qemu/hw/intc/arm_gicv3_cpuif.c
<<
>>
Prefs
   1/*
   2 * ARM Generic Interrupt Controller v3
   3 *
   4 * Copyright (c) 2016 Linaro Limited
   5 * Written by Peter Maydell
   6 *
   7 * This code is licensed under the GPL, version 2 or (at your option)
   8 * any later version.
   9 */
  10
  11/* This file contains the code for the system register interface
  12 * portions of the GICv3.
  13 */
  14
  15#include "qemu/osdep.h"
  16#include "qemu/bitops.h"
  17#include "qemu/log.h"
  18#include "qemu/main-loop.h"
  19#include "trace.h"
  20#include "gicv3_internal.h"
  21#include "hw/irq.h"
  22#include "cpu.h"
  23
  24void gicv3_set_gicv3state(CPUState *cpu, GICv3CPUState *s)
  25{
  26    ARMCPU *arm_cpu = ARM_CPU(cpu);
  27    CPUARMState *env = &arm_cpu->env;
  28
  29    env->gicv3state = (void *)s;
  30};
  31
  32static GICv3CPUState *icc_cs_from_env(CPUARMState *env)
  33{
  34    return env->gicv3state;
  35}
  36
  37static bool gicv3_use_ns_bank(CPUARMState *env)
  38{
  39    /* Return true if we should use the NonSecure bank for a banked GIC
  40     * CPU interface register. Note that this differs from the
  41     * access_secure_reg() function because GICv3 banked registers are
  42     * banked even for AArch64, unlike the other CPU system registers.
  43     */
  44    return !arm_is_secure_below_el3(env);
  45}
  46
  47/* The minimum BPR for the virtual interface is a configurable property */
  48static inline int icv_min_vbpr(GICv3CPUState *cs)
  49{
  50    return 7 - cs->vprebits;
  51}
  52
  53/* Simple accessor functions for LR fields */
  54static uint32_t ich_lr_vintid(uint64_t lr)
  55{
  56    return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH);
  57}
  58
  59static uint32_t ich_lr_pintid(uint64_t lr)
  60{
  61    return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH);
  62}
  63
  64static uint32_t ich_lr_prio(uint64_t lr)
  65{
  66    return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH);
  67}
  68
  69static int ich_lr_state(uint64_t lr)
  70{
  71    return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH);
  72}
  73
  74static bool icv_access(CPUARMState *env, int hcr_flags)
  75{
  76    /* Return true if this ICC_ register access should really be
  77     * directed to an ICV_ access. hcr_flags is a mask of
  78     * HCR_EL2 bits to check: we treat this as an ICV_ access
  79     * if we are in NS EL1 and at least one of the specified
  80     * HCR_EL2 bits is set.
  81     *
  82     * ICV registers fall into four categories:
  83     *  * access if NS EL1 and HCR_EL2.FMO == 1:
  84     *    all ICV regs with '0' in their name
  85     *  * access if NS EL1 and HCR_EL2.IMO == 1:
  86     *    all ICV regs with '1' in their name
  87     *  * access if NS EL1 and either IMO or FMO == 1:
  88     *    CTLR, DIR, PMR, RPR
  89     */
  90    uint64_t hcr_el2 = arm_hcr_el2_eff(env);
  91    bool flagmatch = hcr_el2 & hcr_flags & (HCR_IMO | HCR_FMO);
  92
  93    return flagmatch && arm_current_el(env) == 1
  94        && !arm_is_secure_below_el3(env);
  95}
  96
  97static int read_vbpr(GICv3CPUState *cs, int grp)
  98{
  99    /* Read VBPR value out of the VMCR field (caller must handle
 100     * VCBPR effects if required)
 101     */
 102    if (grp == GICV3_G0) {
 103        return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
 104                     ICH_VMCR_EL2_VBPR0_LENGTH);
 105    } else {
 106        return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
 107                         ICH_VMCR_EL2_VBPR1_LENGTH);
 108    }
 109}
 110
 111static void write_vbpr(GICv3CPUState *cs, int grp, int value)
 112{
 113    /* Write new VBPR1 value, handling the "writing a value less than
 114     * the minimum sets it to the minimum" semantics.
 115     */
 116    int min = icv_min_vbpr(cs);
 117
 118    if (grp != GICV3_G0) {
 119        min++;
 120    }
 121
 122    value = MAX(value, min);
 123
 124    if (grp == GICV3_G0) {
 125        cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
 126                                     ICH_VMCR_EL2_VBPR0_LENGTH, value);
 127    } else {
 128        cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
 129                                     ICH_VMCR_EL2_VBPR1_LENGTH, value);
 130    }
 131}
 132
 133static uint32_t icv_fullprio_mask(GICv3CPUState *cs)
 134{
 135    /* Return a mask word which clears the unimplemented priority bits
 136     * from a priority value for a virtual interrupt. (Not to be confused
 137     * with the group priority, whose mask depends on the value of VBPR
 138     * for the interrupt group.)
 139     */
 140    return ~0U << (8 - cs->vpribits);
 141}
 142
 143static int ich_highest_active_virt_prio(GICv3CPUState *cs)
 144{
 145    /* Calculate the current running priority based on the set bits
 146     * in the ICH Active Priority Registers.
 147     */
 148    int i;
 149    int aprmax = 1 << (cs->vprebits - 5);
 150
 151    assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
 152
 153    for (i = 0; i < aprmax; i++) {
 154        uint32_t apr = cs->ich_apr[GICV3_G0][i] |
 155            cs->ich_apr[GICV3_G1NS][i];
 156
 157        if (!apr) {
 158            continue;
 159        }
 160        return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1);
 161    }
 162    /* No current active interrupts: return idle priority */
 163    return 0xff;
 164}
 165
 166static int hppvi_index(GICv3CPUState *cs)
 167{
 168    /* Return the list register index of the highest priority pending
 169     * virtual interrupt, as per the HighestPriorityVirtualInterrupt
 170     * pseudocode. If no pending virtual interrupts, return -1.
 171     */
 172    int idx = -1;
 173    int i;
 174    /* Note that a list register entry with a priority of 0xff will
 175     * never be reported by this function; this is the architecturally
 176     * correct behaviour.
 177     */
 178    int prio = 0xff;
 179
 180    if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) {
 181        /* Both groups disabled, definitely nothing to do */
 182        return idx;
 183    }
 184
 185    for (i = 0; i < cs->num_list_regs; i++) {
 186        uint64_t lr = cs->ich_lr_el2[i];
 187        int thisprio;
 188
 189        if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) {
 190            /* Not Pending */
 191            continue;
 192        }
 193
 194        /* Ignore interrupts if relevant group enable not set */
 195        if (lr & ICH_LR_EL2_GROUP) {
 196            if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
 197                continue;
 198            }
 199        } else {
 200            if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
 201                continue;
 202            }
 203        }
 204
 205        thisprio = ich_lr_prio(lr);
 206
 207        if (thisprio < prio) {
 208            prio = thisprio;
 209            idx = i;
 210        }
 211    }
 212
 213    return idx;
 214}
 215
 216static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group)
 217{
 218    /* Return a mask word which clears the subpriority bits from
 219     * a priority value for a virtual interrupt in the specified group.
 220     * This depends on the VBPR value.
 221     * If using VBPR0 then:
 222     *  a BPR of 0 means the group priority bits are [7:1];
 223     *  a BPR of 1 means they are [7:2], and so on down to
 224     *  a BPR of 7 meaning no group priority bits at all.
 225     * If using VBPR1 then:
 226     *  a BPR of 0 is impossible (the minimum value is 1)
 227     *  a BPR of 1 means the group priority bits are [7:1];
 228     *  a BPR of 2 means they are [7:2], and so on down to
 229     *  a BPR of 7 meaning the group priority is [7].
 230     *
 231     * Which BPR to use depends on the group of the interrupt and
 232     * the current ICH_VMCR_EL2.VCBPR settings.
 233     *
 234     * This corresponds to the VGroupBits() pseudocode.
 235     */
 236    int bpr;
 237
 238    if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
 239        group = GICV3_G0;
 240    }
 241
 242    bpr = read_vbpr(cs, group);
 243    if (group == GICV3_G1NS) {
 244        assert(bpr > 0);
 245        bpr--;
 246    }
 247
 248    return ~0U << (bpr + 1);
 249}
 250
 251static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr)
 252{
 253    /* Return true if we can signal this virtual interrupt defined by
 254     * the given list register value; see the pseudocode functions
 255     * CanSignalVirtualInterrupt and CanSignalVirtualInt.
 256     * Compare also icc_hppi_can_preempt() which is the non-virtual
 257     * equivalent of these checks.
 258     */
 259    int grp;
 260    uint32_t mask, prio, rprio, vpmr;
 261
 262    if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
 263        /* Virtual interface disabled */
 264        return false;
 265    }
 266
 267    /* We don't need to check that this LR is in Pending state because
 268     * that has already been done in hppvi_index().
 269     */
 270
 271    prio = ich_lr_prio(lr);
 272    vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
 273                     ICH_VMCR_EL2_VPMR_LENGTH);
 274
 275    if (prio >= vpmr) {
 276        /* Priority mask masks this interrupt */
 277        return false;
 278    }
 279
 280    rprio = ich_highest_active_virt_prio(cs);
 281    if (rprio == 0xff) {
 282        /* No running interrupt so we can preempt */
 283        return true;
 284    }
 285
 286    grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
 287
 288    mask = icv_gprio_mask(cs, grp);
 289
 290    /* We only preempt a running interrupt if the pending interrupt's
 291     * group priority is sufficient (the subpriorities are not considered).
 292     */
 293    if ((prio & mask) < (rprio & mask)) {
 294        return true;
 295    }
 296
 297    return false;
 298}
 299
 300static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
 301                                                uint32_t *misr)
 302{
 303    /* Return a set of bits indicating the EOI maintenance interrupt status
 304     * for each list register. The EOI maintenance interrupt status is
 305     * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1
 306     * (see the GICv3 spec for the ICH_EISR_EL2 register).
 307     * If misr is not NULL then we should also collect the information
 308     * about the MISR.EOI, MISR.NP and MISR.U bits.
 309     */
 310    uint32_t value = 0;
 311    int validcount = 0;
 312    bool seenpending = false;
 313    int i;
 314
 315    for (i = 0; i < cs->num_list_regs; i++) {
 316        uint64_t lr = cs->ich_lr_el2[i];
 317
 318        if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI))
 319            == ICH_LR_EL2_EOI) {
 320            value |= (1 << i);
 321        }
 322        if ((lr & ICH_LR_EL2_STATE_MASK)) {
 323            validcount++;
 324        }
 325        if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) {
 326            seenpending = true;
 327        }
 328    }
 329
 330    if (misr) {
 331        if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) {
 332            *misr |= ICH_MISR_EL2_U;
 333        }
 334        if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) {
 335            *misr |= ICH_MISR_EL2_NP;
 336        }
 337        if (value) {
 338            *misr |= ICH_MISR_EL2_EOI;
 339        }
 340    }
 341    return value;
 342}
 343
 344static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
 345{
 346    /* Return a set of bits indicating the maintenance interrupt status
 347     * (as seen in the ICH_MISR_EL2 register).
 348     */
 349    uint32_t value = 0;
 350
 351    /* Scan list registers and fill in the U, NP and EOI bits */
 352    eoi_maintenance_interrupt_state(cs, &value);
 353
 354    if (cs->ich_hcr_el2 & (ICH_HCR_EL2_LRENPIE | ICH_HCR_EL2_EOICOUNT_MASK)) {
 355        value |= ICH_MISR_EL2_LRENP;
 356    }
 357
 358    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) &&
 359        (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
 360        value |= ICH_MISR_EL2_VGRP0E;
 361    }
 362
 363    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) &&
 364        !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
 365        value |= ICH_MISR_EL2_VGRP0D;
 366    }
 367    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) &&
 368        (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
 369        value |= ICH_MISR_EL2_VGRP1E;
 370    }
 371
 372    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) &&
 373        !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
 374        value |= ICH_MISR_EL2_VGRP1D;
 375    }
 376
 377    return value;
 378}
 379
 380static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
 381{
 382    /* Tell the CPU about any pending virtual interrupts or
 383     * maintenance interrupts, following a change to the state
 384     * of the CPU interface relevant to virtual interrupts.
 385     *
 386     * CAUTION: this function will call qemu_set_irq() on the
 387     * CPU maintenance IRQ line, which is typically wired up
 388     * to the GIC as a per-CPU interrupt. This means that it
 389     * will recursively call back into the GIC code via
 390     * gicv3_redist_set_irq() and thus into the CPU interface code's
 391     * gicv3_cpuif_update(). It is therefore important that this
 392     * function is only called as the final action of a CPU interface
 393     * register write implementation, after all the GIC state
 394     * fields have been updated. gicv3_cpuif_update() also must
 395     * not cause this function to be called, but that happens
 396     * naturally as a result of there being no architectural
 397     * linkage between the physical and virtual GIC logic.
 398     */
 399    int idx;
 400    int irqlevel = 0;
 401    int fiqlevel = 0;
 402    int maintlevel = 0;
 403    ARMCPU *cpu = ARM_CPU(cs->cpu);
 404
 405    idx = hppvi_index(cs);
 406    trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx);
 407    if (idx >= 0) {
 408        uint64_t lr = cs->ich_lr_el2[idx];
 409
 410        if (icv_hppi_can_preempt(cs, lr)) {
 411            /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */
 412            if (lr & ICH_LR_EL2_GROUP) {
 413                irqlevel = 1;
 414            } else {
 415                fiqlevel = 1;
 416            }
 417        }
 418    }
 419
 420    if (cs->ich_hcr_el2 & ICH_HCR_EL2_EN) {
 421        maintlevel = maintenance_interrupt_state(cs);
 422    }
 423
 424    trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel,
 425                                    irqlevel, maintlevel);
 426
 427    qemu_set_irq(cs->parent_vfiq, fiqlevel);
 428    qemu_set_irq(cs->parent_virq, irqlevel);
 429    qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel);
 430}
 431
 432static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
 433{
 434    GICv3CPUState *cs = icc_cs_from_env(env);
 435    int regno = ri->opc2 & 3;
 436    int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
 437    uint64_t value = cs->ich_apr[grp][regno];
 438
 439    trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
 440    return value;
 441}
 442
 443static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
 444                         uint64_t value)
 445{
 446    GICv3CPUState *cs = icc_cs_from_env(env);
 447    int regno = ri->opc2 & 3;
 448    int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
 449
 450    trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
 451
 452    cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
 453
 454    gicv3_cpuif_virt_update(cs);
 455    return;
 456}
 457
 458static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
 459{
 460    GICv3CPUState *cs = icc_cs_from_env(env);
 461    int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
 462    uint64_t bpr;
 463    bool satinc = false;
 464
 465    if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
 466        /* reads return bpr0 + 1 saturated to 7, writes ignored */
 467        grp = GICV3_G0;
 468        satinc = true;
 469    }
 470
 471    bpr = read_vbpr(cs, grp);
 472
 473    if (satinc) {
 474        bpr++;
 475        bpr = MIN(bpr, 7);
 476    }
 477
 478    trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
 479
 480    return bpr;
 481}
 482
 483static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 484                          uint64_t value)
 485{
 486    GICv3CPUState *cs = icc_cs_from_env(env);
 487    int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
 488
 489    trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1,
 490                              gicv3_redist_affid(cs), value);
 491
 492    if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
 493        /* reads return bpr0 + 1 saturated to 7, writes ignored */
 494        return;
 495    }
 496
 497    write_vbpr(cs, grp, value);
 498
 499    gicv3_cpuif_virt_update(cs);
 500}
 501
 502static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
 503{
 504    GICv3CPUState *cs = icc_cs_from_env(env);
 505    uint64_t value;
 506
 507    value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
 508                      ICH_VMCR_EL2_VPMR_LENGTH);
 509
 510    trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value);
 511    return value;
 512}
 513
 514static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 515                          uint64_t value)
 516{
 517    GICv3CPUState *cs = icc_cs_from_env(env);
 518
 519    trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value);
 520
 521    value &= icv_fullprio_mask(cs);
 522
 523    cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
 524                                 ICH_VMCR_EL2_VPMR_LENGTH, value);
 525
 526    gicv3_cpuif_virt_update(cs);
 527}
 528
 529static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
 530{
 531    GICv3CPUState *cs = icc_cs_from_env(env);
 532    int enbit;
 533    uint64_t value;
 534
 535    enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
 536    value = extract64(cs->ich_vmcr_el2, enbit, 1);
 537
 538    trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0,
 539                                gicv3_redist_affid(cs), value);
 540    return value;
 541}
 542
 543static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
 544                             uint64_t value)
 545{
 546    GICv3CPUState *cs = icc_cs_from_env(env);
 547    int enbit;
 548
 549    trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0,
 550                                 gicv3_redist_affid(cs), value);
 551
 552    enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
 553
 554    cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value);
 555    gicv3_cpuif_virt_update(cs);
 556}
 557
 558static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
 559{
 560    GICv3CPUState *cs = icc_cs_from_env(env);
 561    uint64_t value;
 562
 563    /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits)
 564     * should match the ones reported in ich_vtr_read().
 565     */
 566    value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
 567        (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
 568
 569    if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
 570        value |= ICC_CTLR_EL1_EOIMODE;
 571    }
 572
 573    if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
 574        value |= ICC_CTLR_EL1_CBPR;
 575    }
 576
 577    trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value);
 578    return value;
 579}
 580
 581static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 582                               uint64_t value)
 583{
 584    GICv3CPUState *cs = icc_cs_from_env(env);
 585
 586    trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value);
 587
 588    cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT,
 589                                 1, value & ICC_CTLR_EL1_CBPR ? 1 : 0);
 590    cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
 591                                 1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
 592
 593    gicv3_cpuif_virt_update(cs);
 594}
 595
 596static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
 597{
 598    GICv3CPUState *cs = icc_cs_from_env(env);
 599    int prio = ich_highest_active_virt_prio(cs);
 600
 601    trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio);
 602    return prio;
 603}
 604
 605static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri)
 606{
 607    GICv3CPUState *cs = icc_cs_from_env(env);
 608    int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
 609    int idx = hppvi_index(cs);
 610    uint64_t value = INTID_SPURIOUS;
 611
 612    if (idx >= 0) {
 613        uint64_t lr = cs->ich_lr_el2[idx];
 614        int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
 615
 616        if (grp == thisgrp) {
 617            value = ich_lr_vintid(lr);
 618        }
 619    }
 620
 621    trace_gicv3_icv_hppir_read(grp, gicv3_redist_affid(cs), value);
 622    return value;
 623}
 624
 625static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp)
 626{
 627    /* Activate the interrupt in the specified list register
 628     * by moving it from Pending to Active state, and update the
 629     * Active Priority Registers.
 630     */
 631    uint32_t mask = icv_gprio_mask(cs, grp);
 632    int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask;
 633    int aprbit = prio >> (8 - cs->vprebits);
 634    int regno = aprbit / 32;
 635    int regbit = aprbit % 32;
 636
 637    cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
 638    cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT;
 639    cs->ich_apr[grp][regno] |= (1 << regbit);
 640}
 641
 642static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
 643{
 644    GICv3CPUState *cs = icc_cs_from_env(env);
 645    int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
 646    int idx = hppvi_index(cs);
 647    uint64_t intid = INTID_SPURIOUS;
 648
 649    if (idx >= 0) {
 650        uint64_t lr = cs->ich_lr_el2[idx];
 651        int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
 652
 653        if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) {
 654            intid = ich_lr_vintid(lr);
 655            if (intid < INTID_SECURE) {
 656                icv_activate_irq(cs, idx, grp);
 657            } else {
 658                /* Interrupt goes from Pending to Invalid */
 659                cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
 660                /* We will now return the (bogus) ID from the list register,
 661                 * as per the pseudocode.
 662                 */
 663            }
 664        }
 665    }
 666
 667    trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1,
 668                             gicv3_redist_affid(cs), intid);
 669
 670    gicv3_cpuif_virt_update(cs);
 671
 672    return intid;
 673}
 674
 675static int icc_highest_active_prio(GICv3CPUState *cs)
 676{
 677    /* Calculate the current running priority based on the set bits
 678     * in the Active Priority Registers.
 679     */
 680    int i;
 681
 682    for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
 683        uint32_t apr = cs->icc_apr[GICV3_G0][i] |
 684            cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
 685
 686        if (!apr) {
 687            continue;
 688        }
 689        return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1);
 690    }
 691    /* No current active interrupts: return idle priority */
 692    return 0xff;
 693}
 694
 695static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group)
 696{
 697    /* Return a mask word which clears the subpriority bits from
 698     * a priority value for an interrupt in the specified group.
 699     * This depends on the BPR value. For CBPR0 (S or NS):
 700     *  a BPR of 0 means the group priority bits are [7:1];
 701     *  a BPR of 1 means they are [7:2], and so on down to
 702     *  a BPR of 7 meaning no group priority bits at all.
 703     * For CBPR1 NS:
 704     *  a BPR of 0 is impossible (the minimum value is 1)
 705     *  a BPR of 1 means the group priority bits are [7:1];
 706     *  a BPR of 2 means they are [7:2], and so on down to
 707     *  a BPR of 7 meaning the group priority is [7].
 708     *
 709     * Which BPR to use depends on the group of the interrupt and
 710     * the current ICC_CTLR.CBPR settings.
 711     *
 712     * This corresponds to the GroupBits() pseudocode.
 713     */
 714    int bpr;
 715
 716    if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) ||
 717        (group == GICV3_G1NS &&
 718         cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
 719        group = GICV3_G0;
 720    }
 721
 722    bpr = cs->icc_bpr[group] & 7;
 723
 724    if (group == GICV3_G1NS) {
 725        assert(bpr > 0);
 726        bpr--;
 727    }
 728
 729    return ~0U << (bpr + 1);
 730}
 731
 732static bool icc_no_enabled_hppi(GICv3CPUState *cs)
 733{
 734    /* Return true if there is no pending interrupt, or the
 735     * highest priority pending interrupt is in a group which has been
 736     * disabled at the CPU interface by the ICC_IGRPEN* register enable bits.
 737     */
 738    return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0);
 739}
 740
 741static bool icc_hppi_can_preempt(GICv3CPUState *cs)
 742{
 743    /* Return true if we have a pending interrupt of sufficient
 744     * priority to preempt.
 745     */
 746    int rprio;
 747    uint32_t mask;
 748
 749    if (icc_no_enabled_hppi(cs)) {
 750        return false;
 751    }
 752
 753    if (cs->hppi.prio >= cs->icc_pmr_el1) {
 754        /* Priority mask masks this interrupt */
 755        return false;
 756    }
 757
 758    rprio = icc_highest_active_prio(cs);
 759    if (rprio == 0xff) {
 760        /* No currently running interrupt so we can preempt */
 761        return true;
 762    }
 763
 764    mask = icc_gprio_mask(cs, cs->hppi.grp);
 765
 766    /* We only preempt a running interrupt if the pending interrupt's
 767     * group priority is sufficient (the subpriorities are not considered).
 768     */
 769    if ((cs->hppi.prio & mask) < (rprio & mask)) {
 770        return true;
 771    }
 772
 773    return false;
 774}
 775
 776void gicv3_cpuif_update(GICv3CPUState *cs)
 777{
 778    /* Tell the CPU about its highest priority pending interrupt */
 779    int irqlevel = 0;
 780    int fiqlevel = 0;
 781    ARMCPU *cpu = ARM_CPU(cs->cpu);
 782    CPUARMState *env = &cpu->env;
 783
 784    g_assert(qemu_mutex_iothread_locked());
 785
 786    trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
 787                             cs->hppi.grp, cs->hppi.prio);
 788
 789    if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) {
 790        /* If a Security-enabled GIC sends a G1S interrupt to a
 791         * Security-disabled CPU, we must treat it as if it were G0.
 792         */
 793        cs->hppi.grp = GICV3_G0;
 794    }
 795
 796    if (icc_hppi_can_preempt(cs)) {
 797        /* We have an interrupt: should we signal it as IRQ or FIQ?
 798         * This is described in the GICv3 spec section 4.6.2.
 799         */
 800        bool isfiq;
 801
 802        switch (cs->hppi.grp) {
 803        case GICV3_G0:
 804            isfiq = true;
 805            break;
 806        case GICV3_G1:
 807            isfiq = (!arm_is_secure(env) ||
 808                     (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3)));
 809            break;
 810        case GICV3_G1NS:
 811            isfiq = arm_is_secure(env);
 812            break;
 813        default:
 814            g_assert_not_reached();
 815        }
 816
 817        if (isfiq) {
 818            fiqlevel = 1;
 819        } else {
 820            irqlevel = 1;
 821        }
 822    }
 823
 824    trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
 825
 826    qemu_set_irq(cs->parent_fiq, fiqlevel);
 827    qemu_set_irq(cs->parent_irq, irqlevel);
 828}
 829
 830static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
 831{
 832    GICv3CPUState *cs = icc_cs_from_env(env);
 833    uint32_t value = cs->icc_pmr_el1;
 834
 835    if (icv_access(env, HCR_FMO | HCR_IMO)) {
 836        return icv_pmr_read(env, ri);
 837    }
 838
 839    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
 840        (env->cp15.scr_el3 & SCR_FIQ)) {
 841        /* NS access and Group 0 is inaccessible to NS: return the
 842         * NS view of the current priority
 843         */
 844        if ((value & 0x80) == 0) {
 845            /* Secure priorities not visible to NS */
 846            value = 0;
 847        } else if (value != 0xff) {
 848            value = (value << 1) & 0xff;
 849        }
 850    }
 851
 852    trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value);
 853
 854    return value;
 855}
 856
 857static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 858                          uint64_t value)
 859{
 860    GICv3CPUState *cs = icc_cs_from_env(env);
 861
 862    if (icv_access(env, HCR_FMO | HCR_IMO)) {
 863        return icv_pmr_write(env, ri, value);
 864    }
 865
 866    trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
 867
 868    value &= 0xff;
 869
 870    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
 871        (env->cp15.scr_el3 & SCR_FIQ)) {
 872        /* NS access and Group 0 is inaccessible to NS: return the
 873         * NS view of the current priority
 874         */
 875        if (!(cs->icc_pmr_el1 & 0x80)) {
 876            /* Current PMR in the secure range, don't allow NS to change it */
 877            return;
 878        }
 879        value = (value >> 1) | 0x80;
 880    }
 881    cs->icc_pmr_el1 = value;
 882    gicv3_cpuif_update(cs);
 883}
 884
 885static void icc_activate_irq(GICv3CPUState *cs, int irq)
 886{
 887    /* Move the interrupt from the Pending state to Active, and update
 888     * the Active Priority Registers
 889     */
 890    uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp);
 891    int prio = cs->hppi.prio & mask;
 892    int aprbit = prio >> 1;
 893    int regno = aprbit / 32;
 894    int regbit = aprbit % 32;
 895
 896    cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit);
 897
 898    if (irq < GIC_INTERNAL) {
 899        cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
 900        cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0);
 901        gicv3_redist_update(cs);
 902    } else {
 903        gicv3_gicd_active_set(cs->gic, irq);
 904        gicv3_gicd_pending_clear(cs->gic, irq);
 905        gicv3_update(cs->gic, irq, 1);
 906    }
 907}
 908
 909static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env)
 910{
 911    /* Return the highest priority pending interrupt register value
 912     * for group 0.
 913     */
 914    bool irq_is_secure;
 915
 916    if (cs->hppi.prio == 0xff) {
 917        return INTID_SPURIOUS;
 918    }
 919
 920    /* Check whether we can return the interrupt or if we should return
 921     * a special identifier, as per the CheckGroup0ForSpecialIdentifiers
 922     * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
 923     * is always zero.)
 924     */
 925    irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
 926                     (cs->hppi.grp != GICV3_G1NS));
 927
 928    if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) {
 929        return INTID_SPURIOUS;
 930    }
 931    if (irq_is_secure && !arm_is_secure(env)) {
 932        /* Secure interrupts not visible to Nonsecure */
 933        return INTID_SPURIOUS;
 934    }
 935
 936    if (cs->hppi.grp != GICV3_G0) {
 937        /* Indicate to EL3 that there's a Group 1 interrupt for the other
 938         * state pending.
 939         */
 940        return irq_is_secure ? INTID_SECURE : INTID_NONSECURE;
 941    }
 942
 943    return cs->hppi.irq;
 944}
 945
 946static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env)
 947{
 948    /* Return the highest priority pending interrupt register value
 949     * for group 1.
 950     */
 951    bool irq_is_secure;
 952
 953    if (cs->hppi.prio == 0xff) {
 954        return INTID_SPURIOUS;
 955    }
 956
 957    /* Check whether we can return the interrupt or if we should return
 958     * a special identifier, as per the CheckGroup1ForSpecialIdentifiers
 959     * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
 960     * is always zero.)
 961     */
 962    irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
 963                     (cs->hppi.grp != GICV3_G1NS));
 964
 965    if (cs->hppi.grp == GICV3_G0) {
 966        /* Group 0 interrupts not visible via HPPIR1 */
 967        return INTID_SPURIOUS;
 968    }
 969    if (irq_is_secure) {
 970        if (!arm_is_secure(env)) {
 971            /* Secure interrupts not visible in Non-secure */
 972            return INTID_SPURIOUS;
 973        }
 974    } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
 975        /* Group 1 non-secure interrupts not visible in Secure EL1 */
 976        return INTID_SPURIOUS;
 977    }
 978
 979    return cs->hppi.irq;
 980}
 981
 982static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri)
 983{
 984    GICv3CPUState *cs = icc_cs_from_env(env);
 985    uint64_t intid;
 986
 987    if (icv_access(env, HCR_FMO)) {
 988        return icv_iar_read(env, ri);
 989    }
 990
 991    if (!icc_hppi_can_preempt(cs)) {
 992        intid = INTID_SPURIOUS;
 993    } else {
 994        intid = icc_hppir0_value(cs, env);
 995    }
 996
 997    if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) {
 998        icc_activate_irq(cs, intid);
 999    }
1000
1001    trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid);
1002    return intid;
1003}
1004
1005static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1006{
1007    GICv3CPUState *cs = icc_cs_from_env(env);
1008    uint64_t intid;
1009
1010    if (icv_access(env, HCR_IMO)) {
1011        return icv_iar_read(env, ri);
1012    }
1013
1014    if (!icc_hppi_can_preempt(cs)) {
1015        intid = INTID_SPURIOUS;
1016    } else {
1017        intid = icc_hppir1_value(cs, env);
1018    }
1019
1020    if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) {
1021        icc_activate_irq(cs, intid);
1022    }
1023
1024    trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid);
1025    return intid;
1026}
1027
1028static void icc_drop_prio(GICv3CPUState *cs, int grp)
1029{
1030    /* Drop the priority of the currently active interrupt in
1031     * the specified group.
1032     *
1033     * Note that we can guarantee (because of the requirement to nest
1034     * ICC_IAR reads [which activate an interrupt and raise priority]
1035     * with ICC_EOIR writes [which drop the priority for the interrupt])
1036     * that the interrupt we're being called for is the highest priority
1037     * active interrupt, meaning that it has the lowest set bit in the
1038     * APR registers.
1039     *
1040     * If the guest does not honour the ordering constraints then the
1041     * behaviour of the GIC is UNPREDICTABLE, which for us means that
1042     * the values of the APR registers might become incorrect and the
1043     * running priority will be wrong, so interrupts that should preempt
1044     * might not do so, and interrupts that should not preempt might do so.
1045     */
1046    int i;
1047
1048    for (i = 0; i < ARRAY_SIZE(cs->icc_apr[grp]); i++) {
1049        uint64_t *papr = &cs->icc_apr[grp][i];
1050
1051        if (!*papr) {
1052            continue;
1053        }
1054        /* Clear the lowest set bit */
1055        *papr &= *papr - 1;
1056        break;
1057    }
1058
1059    /* running priority change means we need an update for this cpu i/f */
1060    gicv3_cpuif_update(cs);
1061}
1062
1063static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs)
1064{
1065    /* Return true if we should split priority drop and interrupt
1066     * deactivation, ie whether the relevant EOIMode bit is set.
1067     */
1068    if (arm_is_el3_or_mon(env)) {
1069        return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3;
1070    }
1071    if (arm_is_secure_below_el3(env)) {
1072        return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE;
1073    } else {
1074        return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE;
1075    }
1076}
1077
1078static int icc_highest_active_group(GICv3CPUState *cs)
1079{
1080    /* Return the group with the highest priority active interrupt.
1081     * We can do this by just comparing the APRs to see which one
1082     * has the lowest set bit.
1083     * (If more than one group is active at the same priority then
1084     * we're in UNPREDICTABLE territory.)
1085     */
1086    int i;
1087
1088    for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
1089        int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]);
1090        int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]);
1091        int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]);
1092
1093        if (g1nsctz < g0ctz && g1nsctz < g1ctz) {
1094            return GICV3_G1NS;
1095        }
1096        if (g1ctz < g0ctz) {
1097            return GICV3_G1;
1098        }
1099        if (g0ctz < 32) {
1100            return GICV3_G0;
1101        }
1102    }
1103    /* No set active bits? UNPREDICTABLE; return -1 so the caller
1104     * ignores the spurious EOI attempt.
1105     */
1106    return -1;
1107}
1108
1109static void icc_deactivate_irq(GICv3CPUState *cs, int irq)
1110{
1111    if (irq < GIC_INTERNAL) {
1112        cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0);
1113        gicv3_redist_update(cs);
1114    } else {
1115        gicv3_gicd_active_clear(cs->gic, irq);
1116        gicv3_update(cs->gic, irq, 1);
1117    }
1118}
1119
1120static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs)
1121{
1122    /* Return true if we should split priority drop and interrupt
1123     * deactivation, ie whether the virtual EOIMode bit is set.
1124     */
1125    return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM;
1126}
1127
1128static int icv_find_active(GICv3CPUState *cs, int irq)
1129{
1130    /* Given an interrupt number for an active interrupt, return the index
1131     * of the corresponding list register, or -1 if there is no match.
1132     * Corresponds to FindActiveVirtualInterrupt pseudocode.
1133     */
1134    int i;
1135
1136    for (i = 0; i < cs->num_list_regs; i++) {
1137        uint64_t lr = cs->ich_lr_el2[i];
1138
1139        if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) {
1140            return i;
1141        }
1142    }
1143
1144    return -1;
1145}
1146
1147static void icv_deactivate_irq(GICv3CPUState *cs, int idx)
1148{
1149    /* Deactivate the interrupt in the specified list register index */
1150    uint64_t lr = cs->ich_lr_el2[idx];
1151
1152    if (lr & ICH_LR_EL2_HW) {
1153        /* Deactivate the associated physical interrupt */
1154        int pirq = ich_lr_pintid(lr);
1155
1156        if (pirq < INTID_SECURE) {
1157            icc_deactivate_irq(cs, pirq);
1158        }
1159    }
1160
1161    /* Clear the 'active' part of the state, so ActivePending->Pending
1162     * and Active->Invalid.
1163     */
1164    lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT;
1165    cs->ich_lr_el2[idx] = lr;
1166}
1167
1168static void icv_increment_eoicount(GICv3CPUState *cs)
1169{
1170    /* Increment the EOICOUNT field in ICH_HCR_EL2 */
1171    int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
1172                             ICH_HCR_EL2_EOICOUNT_LENGTH);
1173
1174    cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
1175                                ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1);
1176}
1177
1178static int icv_drop_prio(GICv3CPUState *cs)
1179{
1180    /* Drop the priority of the currently active virtual interrupt
1181     * (favouring group 0 if there is a set active bit at
1182     * the same priority for both group 0 and group 1).
1183     * Return the priority value for the bit we just cleared,
1184     * or 0xff if no bits were set in the AP registers at all.
1185     * Note that though the ich_apr[] are uint64_t only the low
1186     * 32 bits are actually relevant.
1187     */
1188    int i;
1189    int aprmax = 1 << (cs->vprebits - 5);
1190
1191    assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
1192
1193    for (i = 0; i < aprmax; i++) {
1194        uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i];
1195        uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i];
1196        int apr0count, apr1count;
1197
1198        if (!*papr0 && !*papr1) {
1199            continue;
1200        }
1201
1202        /* We can't just use the bit-twiddling hack icc_drop_prio() does
1203         * because we need to return the bit number we cleared so
1204         * it can be compared against the list register's priority field.
1205         */
1206        apr0count = ctz32(*papr0);
1207        apr1count = ctz32(*papr1);
1208
1209        if (apr0count <= apr1count) {
1210            *papr0 &= *papr0 - 1;
1211            return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1);
1212        } else {
1213            *papr1 &= *papr1 - 1;
1214            return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1);
1215        }
1216    }
1217    return 0xff;
1218}
1219
1220static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1221                          uint64_t value)
1222{
1223    /* Deactivate interrupt */
1224    GICv3CPUState *cs = icc_cs_from_env(env);
1225    int idx;
1226    int irq = value & 0xffffff;
1227
1228    trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value);
1229
1230    if (irq >= GICV3_MAXIRQ) {
1231        /* Also catches special interrupt numbers and LPIs */
1232        return;
1233    }
1234
1235    if (!icv_eoi_split(env, cs)) {
1236        return;
1237    }
1238
1239    idx = icv_find_active(cs, irq);
1240
1241    if (idx < 0) {
1242        /* No list register matching this, so increment the EOI count
1243         * (might trigger a maintenance interrupt)
1244         */
1245        icv_increment_eoicount(cs);
1246    } else {
1247        icv_deactivate_irq(cs, idx);
1248    }
1249
1250    gicv3_cpuif_virt_update(cs);
1251}
1252
1253static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1254                           uint64_t value)
1255{
1256    /* End of Interrupt */
1257    GICv3CPUState *cs = icc_cs_from_env(env);
1258    int irq = value & 0xffffff;
1259    int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
1260    int idx, dropprio;
1261
1262    trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1,
1263                               gicv3_redist_affid(cs), value);
1264
1265    if (irq >= GICV3_MAXIRQ) {
1266        /* Also catches special interrupt numbers and LPIs */
1267        return;
1268    }
1269
1270    /* We implement the IMPDEF choice of "drop priority before doing
1271     * error checks" (because that lets us avoid scanning the AP
1272     * registers twice).
1273     */
1274    dropprio = icv_drop_prio(cs);
1275    if (dropprio == 0xff) {
1276        /* No active interrupt. It is CONSTRAINED UNPREDICTABLE
1277         * whether the list registers are checked in this
1278         * situation; we choose not to.
1279         */
1280        return;
1281    }
1282
1283    idx = icv_find_active(cs, irq);
1284
1285    if (idx < 0) {
1286        /* No valid list register corresponding to EOI ID */
1287        icv_increment_eoicount(cs);
1288    } else {
1289        uint64_t lr = cs->ich_lr_el2[idx];
1290        int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
1291        int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp);
1292
1293        if (thisgrp == grp && lr_gprio == dropprio) {
1294            if (!icv_eoi_split(env, cs)) {
1295                /* Priority drop and deactivate not split: deactivate irq now */
1296                icv_deactivate_irq(cs, idx);
1297            }
1298        }
1299    }
1300
1301    gicv3_cpuif_virt_update(cs);
1302}
1303
1304static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1305                           uint64_t value)
1306{
1307    /* End of Interrupt */
1308    GICv3CPUState *cs = icc_cs_from_env(env);
1309    int irq = value & 0xffffff;
1310    int grp;
1311    bool is_eoir0 = ri->crm == 8;
1312
1313    if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) {
1314        icv_eoir_write(env, ri, value);
1315        return;
1316    }
1317
1318    trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1,
1319                               gicv3_redist_affid(cs), value);
1320
1321    if (irq >= cs->gic->num_irq) {
1322        /* This handles two cases:
1323         * 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
1324         * to the GICC_EOIR, the GIC ignores that write.
1325         * 2. If software writes the number of a non-existent interrupt
1326         * this must be a subcase of "value written does not match the last
1327         * valid interrupt value read from the Interrupt Acknowledge
1328         * register" and so this is UNPREDICTABLE. We choose to ignore it.
1329         */
1330        return;
1331    }
1332
1333    grp = icc_highest_active_group(cs);
1334    switch (grp) {
1335    case GICV3_G0:
1336        if (!is_eoir0) {
1337            return;
1338        }
1339        if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS)
1340            && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) {
1341            return;
1342        }
1343        break;
1344    case GICV3_G1:
1345        if (is_eoir0) {
1346            return;
1347        }
1348        if (!arm_is_secure(env)) {
1349            return;
1350        }
1351        break;
1352    case GICV3_G1NS:
1353        if (is_eoir0) {
1354            return;
1355        }
1356        if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
1357            return;
1358        }
1359        break;
1360    default:
1361        qemu_log_mask(LOG_GUEST_ERROR,
1362                      "%s: IRQ %d isn't active\n", __func__, irq);
1363        return;
1364    }
1365
1366    icc_drop_prio(cs, grp);
1367
1368    if (!icc_eoi_split(env, cs)) {
1369        /* Priority drop and deactivate not split: deactivate irq now */
1370        icc_deactivate_irq(cs, irq);
1371    }
1372}
1373
1374static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
1375{
1376    GICv3CPUState *cs = icc_cs_from_env(env);
1377    uint64_t value;
1378
1379    if (icv_access(env, HCR_FMO)) {
1380        return icv_hppir_read(env, ri);
1381    }
1382
1383    value = icc_hppir0_value(cs, env);
1384    trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value);
1385    return value;
1386}
1387
1388static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1389{
1390    GICv3CPUState *cs = icc_cs_from_env(env);
1391    uint64_t value;
1392
1393    if (icv_access(env, HCR_IMO)) {
1394        return icv_hppir_read(env, ri);
1395    }
1396
1397    value = icc_hppir1_value(cs, env);
1398    trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value);
1399    return value;
1400}
1401
1402static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1403{
1404    GICv3CPUState *cs = icc_cs_from_env(env);
1405    int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
1406    bool satinc = false;
1407    uint64_t bpr;
1408
1409    if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1410        return icv_bpr_read(env, ri);
1411    }
1412
1413    if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1414        grp = GICV3_G1NS;
1415    }
1416
1417    if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
1418        (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
1419        /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
1420         * modify BPR0
1421         */
1422        grp = GICV3_G0;
1423    }
1424
1425    if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
1426        (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
1427        /* reads return bpr0 + 1 sat to 7, writes ignored */
1428        grp = GICV3_G0;
1429        satinc = true;
1430    }
1431
1432    bpr = cs->icc_bpr[grp];
1433    if (satinc) {
1434        bpr++;
1435        bpr = MIN(bpr, 7);
1436    }
1437
1438    trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
1439
1440    return bpr;
1441}
1442
1443static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1444                          uint64_t value)
1445{
1446    GICv3CPUState *cs = icc_cs_from_env(env);
1447    int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
1448    uint64_t minval;
1449
1450    if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1451        icv_bpr_write(env, ri, value);
1452        return;
1453    }
1454
1455    trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
1456                              gicv3_redist_affid(cs), value);
1457
1458    if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1459        grp = GICV3_G1NS;
1460    }
1461
1462    if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
1463        (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
1464        /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
1465         * modify BPR0
1466         */
1467        grp = GICV3_G0;
1468    }
1469
1470    if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
1471        (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
1472        /* reads return bpr0 + 1 sat to 7, writes ignored */
1473        return;
1474    }
1475
1476    minval = (grp == GICV3_G1NS) ? GIC_MIN_BPR_NS : GIC_MIN_BPR;
1477    if (value < minval) {
1478        value = minval;
1479    }
1480
1481    cs->icc_bpr[grp] = value & 7;
1482    gicv3_cpuif_update(cs);
1483}
1484
1485static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1486{
1487    GICv3CPUState *cs = icc_cs_from_env(env);
1488    uint64_t value;
1489
1490    int regno = ri->opc2 & 3;
1491    int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
1492
1493    if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1494        return icv_ap_read(env, ri);
1495    }
1496
1497    if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1498        grp = GICV3_G1NS;
1499    }
1500
1501    value = cs->icc_apr[grp][regno];
1502
1503    trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1504    return value;
1505}
1506
1507static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1508                         uint64_t value)
1509{
1510    GICv3CPUState *cs = icc_cs_from_env(env);
1511
1512    int regno = ri->opc2 & 3;
1513    int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
1514
1515    if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1516        icv_ap_write(env, ri, value);
1517        return;
1518    }
1519
1520    trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1521
1522    if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1523        grp = GICV3_G1NS;
1524    }
1525
1526    /* It's not possible to claim that a Non-secure interrupt is active
1527     * at a priority outside the Non-secure range (128..255), since this
1528     * would otherwise allow malicious NS code to block delivery of S interrupts
1529     * by writing a bad value to these registers.
1530     */
1531    if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) {
1532        return;
1533    }
1534
1535    cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU;
1536    gicv3_cpuif_update(cs);
1537}
1538
1539static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1540                          uint64_t value)
1541{
1542    /* Deactivate interrupt */
1543    GICv3CPUState *cs = icc_cs_from_env(env);
1544    int irq = value & 0xffffff;
1545    bool irq_is_secure, single_sec_state, irq_is_grp0;
1546    bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2;
1547
1548    if (icv_access(env, HCR_FMO | HCR_IMO)) {
1549        icv_dir_write(env, ri, value);
1550        return;
1551    }
1552
1553    trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value);
1554
1555    if (irq >= cs->gic->num_irq) {
1556        /* Also catches special interrupt numbers and LPIs */
1557        return;
1558    }
1559
1560    if (!icc_eoi_split(env, cs)) {
1561        return;
1562    }
1563
1564    int grp = gicv3_irq_group(cs->gic, cs, irq);
1565
1566    single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS;
1567    irq_is_secure = !single_sec_state && (grp != GICV3_G1NS);
1568    irq_is_grp0 = grp == GICV3_G0;
1569
1570    /* Check whether we're allowed to deactivate this interrupt based
1571     * on its group and the current CPU state.
1572     * These checks are laid out to correspond to the spec's pseudocode.
1573     */
1574    route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ;
1575    route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ;
1576    /* No need to include !IsSecure in route_*_to_el2 as it's only
1577     * tested in cases where we know !IsSecure is true.
1578     */
1579    uint64_t hcr_el2 = arm_hcr_el2_eff(env);
1580    route_fiq_to_el2 = hcr_el2 & HCR_FMO;
1581    route_irq_to_el2 = hcr_el2 & HCR_IMO;
1582
1583    switch (arm_current_el(env)) {
1584    case 3:
1585        break;
1586    case 2:
1587        if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) {
1588            break;
1589        }
1590        if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) {
1591            break;
1592        }
1593        return;
1594    case 1:
1595        if (!arm_is_secure_below_el3(env)) {
1596            if (single_sec_state && irq_is_grp0 &&
1597                !route_fiq_to_el3 && !route_fiq_to_el2) {
1598                break;
1599            }
1600            if (!irq_is_secure && !irq_is_grp0 &&
1601                !route_irq_to_el3 && !route_irq_to_el2) {
1602                break;
1603            }
1604        } else {
1605            if (irq_is_grp0 && !route_fiq_to_el3) {
1606                break;
1607            }
1608            if (!irq_is_grp0 &&
1609                (!irq_is_secure || !single_sec_state) &&
1610                !route_irq_to_el3) {
1611                break;
1612            }
1613        }
1614        return;
1615    default:
1616        g_assert_not_reached();
1617    }
1618
1619    icc_deactivate_irq(cs, irq);
1620}
1621
1622static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1623{
1624    GICv3CPUState *cs = icc_cs_from_env(env);
1625    int prio;
1626
1627    if (icv_access(env, HCR_FMO | HCR_IMO)) {
1628        return icv_rpr_read(env, ri);
1629    }
1630
1631    prio = icc_highest_active_prio(cs);
1632
1633    if (arm_feature(env, ARM_FEATURE_EL3) &&
1634        !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
1635        /* NS GIC access and Group 0 is inaccessible to NS */
1636        if ((prio & 0x80) == 0) {
1637            /* NS mustn't see priorities in the Secure half of the range */
1638            prio = 0;
1639        } else if (prio != 0xff) {
1640            /* Non-idle priority: show the Non-secure view of it */
1641            prio = (prio << 1) & 0xff;
1642        }
1643    }
1644
1645    trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio);
1646    return prio;
1647}
1648
1649static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs,
1650                             uint64_t value, int grp, bool ns)
1651{
1652    GICv3State *s = cs->gic;
1653
1654    /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */
1655    uint64_t aff = extract64(value, 48, 8) << 16 |
1656        extract64(value, 32, 8) << 8 |
1657        extract64(value, 16, 8);
1658    uint32_t targetlist = extract64(value, 0, 16);
1659    uint32_t irq = extract64(value, 24, 4);
1660    bool irm = extract64(value, 40, 1);
1661    int i;
1662
1663    if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) {
1664        /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1
1665         * interrupts as Group 0 interrupts and must send Secure Group 0
1666         * interrupts to the target CPUs.
1667         */
1668        grp = GICV3_G0;
1669    }
1670
1671    trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm,
1672                                 aff, targetlist);
1673
1674    for (i = 0; i < s->num_cpu; i++) {
1675        GICv3CPUState *ocs = &s->cpu[i];
1676
1677        if (irm) {
1678            /* IRM == 1 : route to all CPUs except self */
1679            if (cs == ocs) {
1680                continue;
1681            }
1682        } else {
1683            /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15]
1684             * where the corresponding bit is set in targetlist
1685             */
1686            int aff0;
1687
1688            if (ocs->gicr_typer >> 40 != aff) {
1689                continue;
1690            }
1691            aff0 = extract64(ocs->gicr_typer, 32, 8);
1692            if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) {
1693                continue;
1694            }
1695        }
1696
1697        /* The redistributor will check against its own GICR_NSACR as needed */
1698        gicv3_redist_send_sgi(ocs, grp, irq, ns);
1699    }
1700}
1701
1702static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri,
1703                           uint64_t value)
1704{
1705    /* Generate Secure Group 0 SGI. */
1706    GICv3CPUState *cs = icc_cs_from_env(env);
1707    bool ns = !arm_is_secure(env);
1708
1709    icc_generate_sgi(env, cs, value, GICV3_G0, ns);
1710}
1711
1712static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
1713                           uint64_t value)
1714{
1715    /* Generate Group 1 SGI for the current Security state */
1716    GICv3CPUState *cs = icc_cs_from_env(env);
1717    int grp;
1718    bool ns = !arm_is_secure(env);
1719
1720    grp = ns ? GICV3_G1NS : GICV3_G1;
1721    icc_generate_sgi(env, cs, value, grp, ns);
1722}
1723
1724static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
1725                             uint64_t value)
1726{
1727    /* Generate Group 1 SGI for the Security state that is not
1728     * the current state
1729     */
1730    GICv3CPUState *cs = icc_cs_from_env(env);
1731    int grp;
1732    bool ns = !arm_is_secure(env);
1733
1734    grp = ns ? GICV3_G1 : GICV3_G1NS;
1735    icc_generate_sgi(env, cs, value, grp, ns);
1736}
1737
1738static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
1739{
1740    GICv3CPUState *cs = icc_cs_from_env(env);
1741    int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
1742    uint64_t value;
1743
1744    if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1745        return icv_igrpen_read(env, ri);
1746    }
1747
1748    if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1749        grp = GICV3_G1NS;
1750    }
1751
1752    value = cs->icc_igrpen[grp];
1753    trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0,
1754                                gicv3_redist_affid(cs), value);
1755    return value;
1756}
1757
1758static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
1759                             uint64_t value)
1760{
1761    GICv3CPUState *cs = icc_cs_from_env(env);
1762    int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
1763
1764    if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1765        icv_igrpen_write(env, ri, value);
1766        return;
1767    }
1768
1769    trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
1770                                 gicv3_redist_affid(cs), value);
1771
1772    if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1773        grp = GICV3_G1NS;
1774    }
1775
1776    cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE;
1777    gicv3_cpuif_update(cs);
1778}
1779
1780static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
1781{
1782    GICv3CPUState *cs = icc_cs_from_env(env);
1783    uint64_t value;
1784
1785    /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
1786    value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1);
1787    trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value);
1788    return value;
1789}
1790
1791static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
1792                                  uint64_t value)
1793{
1794    GICv3CPUState *cs = icc_cs_from_env(env);
1795
1796    trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value);
1797
1798    /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
1799    cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1);
1800    cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1);
1801    gicv3_cpuif_update(cs);
1802}
1803
1804static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1805{
1806    GICv3CPUState *cs = icc_cs_from_env(env);
1807    int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
1808    uint64_t value;
1809
1810    if (icv_access(env, HCR_FMO | HCR_IMO)) {
1811        return icv_ctlr_read(env, ri);
1812    }
1813
1814    value = cs->icc_ctlr_el1[bank];
1815    trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value);
1816    return value;
1817}
1818
1819static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
1820                               uint64_t value)
1821{
1822    GICv3CPUState *cs = icc_cs_from_env(env);
1823    int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
1824    uint64_t mask;
1825
1826    if (icv_access(env, HCR_FMO | HCR_IMO)) {
1827        icv_ctlr_write(env, ri, value);
1828        return;
1829    }
1830
1831    trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value);
1832
1833    /* Only CBPR and EOIMODE can be RW;
1834     * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or
1835     * the asseciated priority-based routing of them);
1836     * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO.
1837     */
1838    if (arm_feature(env, ARM_FEATURE_EL3) &&
1839        ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) {
1840        mask = ICC_CTLR_EL1_EOIMODE;
1841    } else {
1842        mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE;
1843    }
1844
1845    cs->icc_ctlr_el1[bank] &= ~mask;
1846    cs->icc_ctlr_el1[bank] |= (value & mask);
1847    gicv3_cpuif_update(cs);
1848}
1849
1850
1851static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
1852{
1853    GICv3CPUState *cs = icc_cs_from_env(env);
1854    uint64_t value;
1855
1856    value = cs->icc_ctlr_el3;
1857    if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
1858        value |= ICC_CTLR_EL3_EOIMODE_EL1NS;
1859    }
1860    if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
1861        value |= ICC_CTLR_EL3_CBPR_EL1NS;
1862    }
1863    if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
1864        value |= ICC_CTLR_EL3_EOIMODE_EL1S;
1865    }
1866    if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
1867        value |= ICC_CTLR_EL3_CBPR_EL1S;
1868    }
1869
1870    trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value);
1871    return value;
1872}
1873
1874static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
1875                               uint64_t value)
1876{
1877    GICv3CPUState *cs = icc_cs_from_env(env);
1878    uint64_t mask;
1879
1880    trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value);
1881
1882    /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */
1883    cs->icc_ctlr_el1[GICV3_NS] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
1884    if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) {
1885        cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE;
1886    }
1887    if (value & ICC_CTLR_EL3_CBPR_EL1NS) {
1888        cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR;
1889    }
1890
1891    cs->icc_ctlr_el1[GICV3_S] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
1892    if (value & ICC_CTLR_EL3_EOIMODE_EL1S) {
1893        cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE;
1894    }
1895    if (value & ICC_CTLR_EL3_CBPR_EL1S) {
1896        cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR;
1897    }
1898
1899    /* The only bit stored in icc_ctlr_el3 which is writeable is EOIMODE_EL3: */
1900    mask = ICC_CTLR_EL3_EOIMODE_EL3;
1901
1902    cs->icc_ctlr_el3 &= ~mask;
1903    cs->icc_ctlr_el3 |= (value & mask);
1904    gicv3_cpuif_update(cs);
1905}
1906
1907static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
1908                                          const ARMCPRegInfo *ri, bool isread)
1909{
1910    CPAccessResult r = CP_ACCESS_OK;
1911    GICv3CPUState *cs = icc_cs_from_env(env);
1912    int el = arm_current_el(env);
1913
1914    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) &&
1915        el == 1 && !arm_is_secure_below_el3(env)) {
1916        /* Takes priority over a possible EL3 trap */
1917        return CP_ACCESS_TRAP_EL2;
1918    }
1919
1920    if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
1921        switch (el) {
1922        case 1:
1923            /* Note that arm_hcr_el2_eff takes secure state into account.  */
1924            if ((arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) == 0) {
1925                r = CP_ACCESS_TRAP_EL3;
1926            }
1927            break;
1928        case 2:
1929            r = CP_ACCESS_TRAP_EL3;
1930            break;
1931        case 3:
1932            if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
1933                r = CP_ACCESS_TRAP_EL3;
1934            }
1935            break;
1936        default:
1937            g_assert_not_reached();
1938        }
1939    }
1940
1941    if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
1942        r = CP_ACCESS_TRAP;
1943    }
1944    return r;
1945}
1946
1947static CPAccessResult gicv3_dir_access(CPUARMState *env,
1948                                       const ARMCPRegInfo *ri, bool isread)
1949{
1950    GICv3CPUState *cs = icc_cs_from_env(env);
1951
1952    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) &&
1953        arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
1954        /* Takes priority over a possible EL3 trap */
1955        return CP_ACCESS_TRAP_EL2;
1956    }
1957
1958    return gicv3_irqfiq_access(env, ri, isread);
1959}
1960
1961static CPAccessResult gicv3_sgi_access(CPUARMState *env,
1962                                       const ARMCPRegInfo *ri, bool isread)
1963{
1964    if (arm_current_el(env) == 1 &&
1965        (arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) != 0) {
1966        /* Takes priority over a possible EL3 trap */
1967        return CP_ACCESS_TRAP_EL2;
1968    }
1969
1970    return gicv3_irqfiq_access(env, ri, isread);
1971}
1972
1973static CPAccessResult gicv3_fiq_access(CPUARMState *env,
1974                                       const ARMCPRegInfo *ri, bool isread)
1975{
1976    CPAccessResult r = CP_ACCESS_OK;
1977    GICv3CPUState *cs = icc_cs_from_env(env);
1978    int el = arm_current_el(env);
1979
1980    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) &&
1981        el == 1 && !arm_is_secure_below_el3(env)) {
1982        /* Takes priority over a possible EL3 trap */
1983        return CP_ACCESS_TRAP_EL2;
1984    }
1985
1986    if (env->cp15.scr_el3 & SCR_FIQ) {
1987        switch (el) {
1988        case 1:
1989            if ((arm_hcr_el2_eff(env) & HCR_FMO) == 0) {
1990                r = CP_ACCESS_TRAP_EL3;
1991            }
1992            break;
1993        case 2:
1994            r = CP_ACCESS_TRAP_EL3;
1995            break;
1996        case 3:
1997            if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
1998                r = CP_ACCESS_TRAP_EL3;
1999            }
2000            break;
2001        default:
2002            g_assert_not_reached();
2003        }
2004    }
2005
2006    if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
2007        r = CP_ACCESS_TRAP;
2008    }
2009    return r;
2010}
2011
2012static CPAccessResult gicv3_irq_access(CPUARMState *env,
2013                                       const ARMCPRegInfo *ri, bool isread)
2014{
2015    CPAccessResult r = CP_ACCESS_OK;
2016    GICv3CPUState *cs = icc_cs_from_env(env);
2017    int el = arm_current_el(env);
2018
2019    if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) &&
2020        el == 1 && !arm_is_secure_below_el3(env)) {
2021        /* Takes priority over a possible EL3 trap */
2022        return CP_ACCESS_TRAP_EL2;
2023    }
2024
2025    if (env->cp15.scr_el3 & SCR_IRQ) {
2026        switch (el) {
2027        case 1:
2028            if ((arm_hcr_el2_eff(env) & HCR_IMO) == 0) {
2029                r = CP_ACCESS_TRAP_EL3;
2030            }
2031            break;
2032        case 2:
2033            r = CP_ACCESS_TRAP_EL3;
2034            break;
2035        case 3:
2036            if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
2037                r = CP_ACCESS_TRAP_EL3;
2038            }
2039            break;
2040        default:
2041            g_assert_not_reached();
2042        }
2043    }
2044
2045    if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
2046        r = CP_ACCESS_TRAP;
2047    }
2048    return r;
2049}
2050
2051static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2052{
2053    GICv3CPUState *cs = icc_cs_from_env(env);
2054
2055    cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V |
2056        (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
2057        (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
2058    cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V |
2059        (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
2060        (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
2061    cs->icc_pmr_el1 = 0;
2062    cs->icc_bpr[GICV3_G0] = GIC_MIN_BPR;
2063    cs->icc_bpr[GICV3_G1] = GIC_MIN_BPR;
2064    cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR_NS;
2065    memset(cs->icc_apr, 0, sizeof(cs->icc_apr));
2066    memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen));
2067    cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
2068        (1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
2069        (7 << ICC_CTLR_EL3_PRIBITS_SHIFT);
2070
2071    memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
2072    cs->ich_hcr_el2 = 0;
2073    memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
2074    cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
2075        ((icv_min_vbpr(cs) + 1) << ICH_VMCR_EL2_VBPR1_SHIFT) |
2076        (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
2077}
2078
2079static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
2080    { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH,
2081      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0,
2082      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2083      .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
2084      .readfn = icc_pmr_read,
2085      .writefn = icc_pmr_write,
2086      /* We hang the whole cpu interface reset routine off here
2087       * rather than parcelling it out into one little function
2088       * per register
2089       */
2090      .resetfn = icc_reset,
2091    },
2092    { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH,
2093      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0,
2094      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2095      .access = PL1_R, .accessfn = gicv3_fiq_access,
2096      .readfn = icc_iar0_read,
2097    },
2098    { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH,
2099      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1,
2100      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2101      .access = PL1_W, .accessfn = gicv3_fiq_access,
2102      .writefn = icc_eoir_write,
2103    },
2104    { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH,
2105      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2,
2106      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2107      .access = PL1_R, .accessfn = gicv3_fiq_access,
2108      .readfn = icc_hppir0_read,
2109    },
2110    { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH,
2111      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3,
2112      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2113      .access = PL1_RW, .accessfn = gicv3_fiq_access,
2114      .readfn = icc_bpr_read,
2115      .writefn = icc_bpr_write,
2116    },
2117    { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH,
2118      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4,
2119      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2120      .access = PL1_RW, .accessfn = gicv3_fiq_access,
2121      .readfn = icc_ap_read,
2122      .writefn = icc_ap_write,
2123    },
2124    { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
2125      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
2126      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2127      .access = PL1_RW, .accessfn = gicv3_fiq_access,
2128      .readfn = icc_ap_read,
2129      .writefn = icc_ap_write,
2130    },
2131    { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
2132      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
2133      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2134      .access = PL1_RW, .accessfn = gicv3_fiq_access,
2135      .readfn = icc_ap_read,
2136      .writefn = icc_ap_write,
2137    },
2138    { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
2139      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
2140      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2141      .access = PL1_RW, .accessfn = gicv3_fiq_access,
2142      .readfn = icc_ap_read,
2143      .writefn = icc_ap_write,
2144    },
2145    /* All the ICC_AP1R*_EL1 registers are banked */
2146    { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH,
2147      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0,
2148      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2149      .access = PL1_RW, .accessfn = gicv3_irq_access,
2150      .readfn = icc_ap_read,
2151      .writefn = icc_ap_write,
2152    },
2153    { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
2154      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
2155      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2156      .access = PL1_RW, .accessfn = gicv3_irq_access,
2157      .readfn = icc_ap_read,
2158      .writefn = icc_ap_write,
2159    },
2160    { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
2161      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
2162      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2163      .access = PL1_RW, .accessfn = gicv3_irq_access,
2164      .readfn = icc_ap_read,
2165      .writefn = icc_ap_write,
2166    },
2167    { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
2168      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
2169      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2170      .access = PL1_RW, .accessfn = gicv3_irq_access,
2171      .readfn = icc_ap_read,
2172      .writefn = icc_ap_write,
2173    },
2174    { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
2175      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
2176      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2177      .access = PL1_W, .accessfn = gicv3_dir_access,
2178      .writefn = icc_dir_write,
2179    },
2180    { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH,
2181      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3,
2182      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2183      .access = PL1_R, .accessfn = gicv3_irqfiq_access,
2184      .readfn = icc_rpr_read,
2185    },
2186    { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64,
2187      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5,
2188      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2189      .access = PL1_W, .accessfn = gicv3_sgi_access,
2190      .writefn = icc_sgi1r_write,
2191    },
2192    { .name = "ICC_SGI1R",
2193      .cp = 15, .opc1 = 0, .crm = 12,
2194      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
2195      .access = PL1_W, .accessfn = gicv3_sgi_access,
2196      .writefn = icc_sgi1r_write,
2197    },
2198    { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64,
2199      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6,
2200      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2201      .access = PL1_W, .accessfn = gicv3_sgi_access,
2202      .writefn = icc_asgi1r_write,
2203    },
2204    { .name = "ICC_ASGI1R",
2205      .cp = 15, .opc1 = 1, .crm = 12,
2206      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
2207      .access = PL1_W, .accessfn = gicv3_sgi_access,
2208      .writefn = icc_asgi1r_write,
2209    },
2210    { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64,
2211      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7,
2212      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2213      .access = PL1_W, .accessfn = gicv3_sgi_access,
2214      .writefn = icc_sgi0r_write,
2215    },
2216    { .name = "ICC_SGI0R",
2217      .cp = 15, .opc1 = 2, .crm = 12,
2218      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
2219      .access = PL1_W, .accessfn = gicv3_sgi_access,
2220      .writefn = icc_sgi0r_write,
2221    },
2222    { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH,
2223      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0,
2224      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2225      .access = PL1_R, .accessfn = gicv3_irq_access,
2226      .readfn = icc_iar1_read,
2227    },
2228    { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH,
2229      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1,
2230      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2231      .access = PL1_W, .accessfn = gicv3_irq_access,
2232      .writefn = icc_eoir_write,
2233    },
2234    { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH,
2235      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2,
2236      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2237      .access = PL1_R, .accessfn = gicv3_irq_access,
2238      .readfn = icc_hppir1_read,
2239    },
2240    /* This register is banked */
2241    { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH,
2242      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3,
2243      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2244      .access = PL1_RW, .accessfn = gicv3_irq_access,
2245      .readfn = icc_bpr_read,
2246      .writefn = icc_bpr_write,
2247    },
2248    /* This register is banked */
2249    { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH,
2250      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4,
2251      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2252      .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
2253      .readfn = icc_ctlr_el1_read,
2254      .writefn = icc_ctlr_el1_write,
2255    },
2256    { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH,
2257      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5,
2258      .type = ARM_CP_NO_RAW | ARM_CP_CONST,
2259      .access = PL1_RW,
2260      /* We don't support IRQ/FIQ bypass and system registers are
2261       * always enabled, so all our bits are RAZ/WI or RAO/WI.
2262       * This register is banked but since it's constant we don't
2263       * need to do anything special.
2264       */
2265      .resetvalue = 0x7,
2266    },
2267    { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH,
2268      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6,
2269      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2270      .access = PL1_RW, .accessfn = gicv3_fiq_access,
2271      .readfn = icc_igrpen_read,
2272      .writefn = icc_igrpen_write,
2273    },
2274    /* This register is banked */
2275    { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH,
2276      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7,
2277      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2278      .access = PL1_RW, .accessfn = gicv3_irq_access,
2279      .readfn = icc_igrpen_read,
2280      .writefn = icc_igrpen_write,
2281    },
2282    { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH,
2283      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5,
2284      .type = ARM_CP_NO_RAW | ARM_CP_CONST,
2285      .access = PL2_RW,
2286      /* We don't support IRQ/FIQ bypass and system registers are
2287       * always enabled, so all our bits are RAZ/WI or RAO/WI.
2288       */
2289      .resetvalue = 0xf,
2290    },
2291    { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH,
2292      .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4,
2293      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2294      .access = PL3_RW,
2295      .readfn = icc_ctlr_el3_read,
2296      .writefn = icc_ctlr_el3_write,
2297    },
2298    { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH,
2299      .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5,
2300      .type = ARM_CP_NO_RAW | ARM_CP_CONST,
2301      .access = PL3_RW,
2302      /* We don't support IRQ/FIQ bypass and system registers are
2303       * always enabled, so all our bits are RAZ/WI or RAO/WI.
2304       */
2305      .resetvalue = 0xf,
2306    },
2307    { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH,
2308      .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7,
2309      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2310      .access = PL3_RW,
2311      .readfn = icc_igrpen1_el3_read,
2312      .writefn = icc_igrpen1_el3_write,
2313    },
2314    REGINFO_SENTINEL
2315};
2316
2317static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2318{
2319    GICv3CPUState *cs = icc_cs_from_env(env);
2320    int regno = ri->opc2 & 3;
2321    int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
2322    uint64_t value;
2323
2324    value = cs->ich_apr[grp][regno];
2325    trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
2326    return value;
2327}
2328
2329static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2330                         uint64_t value)
2331{
2332    GICv3CPUState *cs = icc_cs_from_env(env);
2333    int regno = ri->opc2 & 3;
2334    int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
2335
2336    trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
2337
2338    cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
2339    gicv3_cpuif_virt_update(cs);
2340}
2341
2342static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2343{
2344    GICv3CPUState *cs = icc_cs_from_env(env);
2345    uint64_t value = cs->ich_hcr_el2;
2346
2347    trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value);
2348    return value;
2349}
2350
2351static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2352                          uint64_t value)
2353{
2354    GICv3CPUState *cs = icc_cs_from_env(env);
2355
2356    trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value);
2357
2358    value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE |
2359        ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE |
2360        ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC |
2361        ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI |
2362        ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK;
2363
2364    cs->ich_hcr_el2 = value;
2365    gicv3_cpuif_virt_update(cs);
2366}
2367
2368static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2369{
2370    GICv3CPUState *cs = icc_cs_from_env(env);
2371    uint64_t value = cs->ich_vmcr_el2;
2372
2373    trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value);
2374    return value;
2375}
2376
2377static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2378                         uint64_t value)
2379{
2380    GICv3CPUState *cs = icc_cs_from_env(env);
2381
2382    trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value);
2383
2384    value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR |
2385        ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK |
2386        ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK;
2387    value |= ICH_VMCR_EL2_VFIQEN;
2388
2389    cs->ich_vmcr_el2 = value;
2390    /* Enforce "writing BPRs to less than minimum sets them to the minimum"
2391     * by reading and writing back the fields.
2392     */
2393    write_vbpr(cs, GICV3_G0, read_vbpr(cs, GICV3_G0));
2394    write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1));
2395
2396    gicv3_cpuif_virt_update(cs);
2397}
2398
2399static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2400{
2401    GICv3CPUState *cs = icc_cs_from_env(env);
2402    int regno = ri->opc2 | ((ri->crm & 1) << 3);
2403    uint64_t value;
2404
2405    /* This read function handles all of:
2406     * 64-bit reads of the whole LR
2407     * 32-bit reads of the low half of the LR
2408     * 32-bit reads of the high half of the LR
2409     */
2410    if (ri->state == ARM_CP_STATE_AA32) {
2411        if (ri->crm >= 14) {
2412            value = extract64(cs->ich_lr_el2[regno], 32, 32);
2413            trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value);
2414        } else {
2415            value = extract64(cs->ich_lr_el2[regno], 0, 32);
2416            trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value);
2417        }
2418    } else {
2419        value = cs->ich_lr_el2[regno];
2420        trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value);
2421    }
2422
2423    return value;
2424}
2425
2426static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2427                         uint64_t value)
2428{
2429    GICv3CPUState *cs = icc_cs_from_env(env);
2430    int regno = ri->opc2 | ((ri->crm & 1) << 3);
2431
2432    /* This write function handles all of:
2433     * 64-bit writes to the whole LR
2434     * 32-bit writes to the low half of the LR
2435     * 32-bit writes to the high half of the LR
2436     */
2437    if (ri->state == ARM_CP_STATE_AA32) {
2438        if (ri->crm >= 14) {
2439            trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value);
2440            value = deposit64(cs->ich_lr_el2[regno], 32, 32, value);
2441        } else {
2442            trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value);
2443            value = deposit64(cs->ich_lr_el2[regno], 0, 32, value);
2444        }
2445    } else {
2446        trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value);
2447    }
2448
2449    /* Enforce RES0 bits in priority field */
2450    if (cs->vpribits < 8) {
2451        value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT,
2452                          8 - cs->vpribits, 0);
2453    }
2454
2455    cs->ich_lr_el2[regno] = value;
2456    gicv3_cpuif_virt_update(cs);
2457}
2458
2459static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2460{
2461    GICv3CPUState *cs = icc_cs_from_env(env);
2462    uint64_t value;
2463
2464    value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
2465        | ICH_VTR_EL2_TDS | ICH_VTR_EL2_NV4 | ICH_VTR_EL2_A3V
2466        | (1 << ICH_VTR_EL2_IDBITS_SHIFT)
2467        | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
2468        | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
2469
2470    trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
2471    return value;
2472}
2473
2474static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2475{
2476    GICv3CPUState *cs = icc_cs_from_env(env);
2477    uint64_t value = maintenance_interrupt_state(cs);
2478
2479    trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value);
2480    return value;
2481}
2482
2483static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2484{
2485    GICv3CPUState *cs = icc_cs_from_env(env);
2486    uint64_t value = eoi_maintenance_interrupt_state(cs, NULL);
2487
2488    trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value);
2489    return value;
2490}
2491
2492static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2493{
2494    GICv3CPUState *cs = icc_cs_from_env(env);
2495    uint64_t value = 0;
2496    int i;
2497
2498    for (i = 0; i < cs->num_list_regs; i++) {
2499        uint64_t lr = cs->ich_lr_el2[i];
2500
2501        if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
2502            ((lr & ICH_LR_EL2_HW) != 0 || (lr & ICH_LR_EL2_EOI) == 0)) {
2503            value |= (1 << i);
2504        }
2505    }
2506
2507    trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
2508    return value;
2509}
2510
2511static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = {
2512    { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH,
2513      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0,
2514      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2515      .access = PL2_RW,
2516      .readfn = ich_ap_read,
2517      .writefn = ich_ap_write,
2518    },
2519    { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH,
2520      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0,
2521      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2522      .access = PL2_RW,
2523      .readfn = ich_ap_read,
2524      .writefn = ich_ap_write,
2525    },
2526    { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH,
2527      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0,
2528      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2529      .access = PL2_RW,
2530      .readfn = ich_hcr_read,
2531      .writefn = ich_hcr_write,
2532    },
2533    { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH,
2534      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1,
2535      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2536      .access = PL2_R,
2537      .readfn = ich_vtr_read,
2538    },
2539    { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH,
2540      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2,
2541      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2542      .access = PL2_R,
2543      .readfn = ich_misr_read,
2544    },
2545    { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH,
2546      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3,
2547      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2548      .access = PL2_R,
2549      .readfn = ich_eisr_read,
2550    },
2551    { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH,
2552      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5,
2553      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2554      .access = PL2_R,
2555      .readfn = ich_elrsr_read,
2556    },
2557    { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH,
2558      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7,
2559      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2560      .access = PL2_RW,
2561      .readfn = ich_vmcr_read,
2562      .writefn = ich_vmcr_write,
2563    },
2564    REGINFO_SENTINEL
2565};
2566
2567static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
2568    { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH,
2569      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1,
2570      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2571      .access = PL2_RW,
2572      .readfn = ich_ap_read,
2573      .writefn = ich_ap_write,
2574    },
2575    { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH,
2576      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1,
2577      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2578      .access = PL2_RW,
2579      .readfn = ich_ap_read,
2580      .writefn = ich_ap_write,
2581    },
2582    REGINFO_SENTINEL
2583};
2584
2585static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
2586    { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH,
2587      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2,
2588      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2589      .access = PL2_RW,
2590      .readfn = ich_ap_read,
2591      .writefn = ich_ap_write,
2592    },
2593    { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH,
2594      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3,
2595      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2596      .access = PL2_RW,
2597      .readfn = ich_ap_read,
2598      .writefn = ich_ap_write,
2599    },
2600    { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH,
2601      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2,
2602      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2603      .access = PL2_RW,
2604      .readfn = ich_ap_read,
2605      .writefn = ich_ap_write,
2606    },
2607    { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH,
2608      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3,
2609      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2610      .access = PL2_RW,
2611      .readfn = ich_ap_read,
2612      .writefn = ich_ap_write,
2613    },
2614    REGINFO_SENTINEL
2615};
2616
2617static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
2618{
2619    GICv3CPUState *cs = opaque;
2620
2621    gicv3_cpuif_update(cs);
2622}
2623
2624void gicv3_init_cpuif(GICv3State *s)
2625{
2626    /* Called from the GICv3 realize function; register our system
2627     * registers with the CPU
2628     */
2629    int i;
2630
2631    for (i = 0; i < s->num_cpu; i++) {
2632        ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
2633        GICv3CPUState *cs = &s->cpu[i];
2634
2635        /* Note that we can't just use the GICv3CPUState as an opaque pointer
2636         * in define_arm_cp_regs_with_opaque(), because when we're called back
2637         * it might be with code translated by CPU 0 but run by CPU 1, in
2638         * which case we'd get the wrong value.
2639         * So instead we define the regs with no ri->opaque info, and
2640         * get back to the GICv3CPUState from the CPUARMState.
2641         */
2642        define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
2643        if (arm_feature(&cpu->env, ARM_FEATURE_EL2)
2644            && cpu->gic_num_lrs) {
2645            int j;
2646
2647            cs->num_list_regs = cpu->gic_num_lrs;
2648            cs->vpribits = cpu->gic_vpribits;
2649            cs->vprebits = cpu->gic_vprebits;
2650
2651            /* Check against architectural constraints: getting these
2652             * wrong would be a bug in the CPU code defining these,
2653             * and the implementation relies on them holding.
2654             */
2655            g_assert(cs->vprebits <= cs->vpribits);
2656            g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
2657            g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
2658
2659            define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
2660
2661            for (j = 0; j < cs->num_list_regs; j++) {
2662                /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
2663                 * are split into two cp15 regs, LR (the low part, with the
2664                 * same encoding as the AArch64 LR) and LRC (the high part).
2665                 */
2666                ARMCPRegInfo lr_regset[] = {
2667                    { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
2668                      .opc0 = 3, .opc1 = 4, .crn = 12,
2669                      .crm = 12 + (j >> 3), .opc2 = j & 7,
2670                      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2671                      .access = PL2_RW,
2672                      .readfn = ich_lr_read,
2673                      .writefn = ich_lr_write,
2674                    },
2675                    { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
2676                      .cp = 15, .opc1 = 4, .crn = 12,
2677                      .crm = 14 + (j >> 3), .opc2 = j & 7,
2678                      .type = ARM_CP_IO | ARM_CP_NO_RAW,
2679                      .access = PL2_RW,
2680                      .readfn = ich_lr_read,
2681                      .writefn = ich_lr_write,
2682                    },
2683                    REGINFO_SENTINEL
2684                };
2685                define_arm_cp_regs(cpu, lr_regset);
2686            }
2687            if (cs->vprebits >= 6) {
2688                define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
2689            }
2690            if (cs->vprebits == 7) {
2691                define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
2692            }
2693        }
2694        arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
2695    }
2696}
2697