qemu/hw/intc/arm_gicv3_redist.c
<<
>>
Prefs
   1/*
   2 * ARM GICv3 emulation: Redistributor
   3 *
   4 * Copyright (c) 2015 Huawei.
   5 * Copyright (c) 2016 Linaro Limited.
   6 * Written by Shlomo Pongratz, Peter Maydell
   7 *
   8 * This code is licensed under the GPL, version 2 or (at your option)
   9 * any later version.
  10 */
  11
  12#include "qemu/osdep.h"
  13#include "qemu/log.h"
  14#include "trace.h"
  15#include "gicv3_internal.h"
  16
  17static uint32_t mask_group(GICv3CPUState *cs, MemTxAttrs attrs)
  18{
  19    /* Return a 32-bit mask which should be applied for this set of 32
  20     * interrupts; each bit is 1 if access is permitted by the
  21     * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does
  22     * not affect config register accesses, unlike GICD_NSACR.)
  23     */
  24    if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
  25        /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */
  26        return cs->gicr_igroupr0;
  27    }
  28    return 0xFFFFFFFFU;
  29}
  30
  31static int gicr_ns_access(GICv3CPUState *cs, int irq)
  32{
  33    /* Return the 2 bit NSACR.NS_access field for this SGI */
  34    assert(irq < 16);
  35    return extract32(cs->gicr_nsacr, irq * 2, 2);
  36}
  37
  38static void gicr_write_set_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
  39                                      uint32_t *reg, uint32_t val)
  40{
  41    /* Helper routine to implement writing to a "set-bitmap" register */
  42    val &= mask_group(cs, attrs);
  43    *reg |= val;
  44    gicv3_redist_update(cs);
  45}
  46
  47static void gicr_write_clear_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
  48                                        uint32_t *reg, uint32_t val)
  49{
  50    /* Helper routine to implement writing to a "clear-bitmap" register */
  51    val &= mask_group(cs, attrs);
  52    *reg &= ~val;
  53    gicv3_redist_update(cs);
  54}
  55
  56static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
  57                                     uint32_t reg)
  58{
  59    reg &= mask_group(cs, attrs);
  60    return reg;
  61}
  62
  63static bool vcpu_resident(GICv3CPUState *cs, uint64_t vptaddr)
  64{
  65    /*
  66     * Return true if a vCPU is resident, which is defined by
  67     * whether the GICR_VPENDBASER register is marked VALID and
  68     * has the right virtual pending table address.
  69     */
  70    if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) {
  71        return false;
  72    }
  73    return vptaddr == (cs->gicr_vpendbaser & R_GICR_VPENDBASER_PHYADDR_MASK);
  74}
  75
  76/**
  77 * update_for_one_lpi: Update pending information if this LPI is better
  78 *
  79 * @cs: GICv3CPUState
  80 * @irq: interrupt to look up in the LPI Configuration table
  81 * @ctbase: physical address of the LPI Configuration table to use
  82 * @ds: true if priority value should not be shifted
  83 * @hpp: points to pending information to update
  84 *
  85 * Look up @irq in the Configuration table specified by @ctbase
  86 * to see if it is enabled and what its priority is. If it is an
  87 * enabled interrupt with a higher priority than that currently
  88 * recorded in @hpp, update @hpp.
  89 */
  90static void update_for_one_lpi(GICv3CPUState *cs, int irq,
  91                               uint64_t ctbase, bool ds, PendingIrq *hpp)
  92{
  93    uint8_t lpite;
  94    uint8_t prio;
  95
  96    address_space_read(&cs->gic->dma_as,
  97                       ctbase + ((irq - GICV3_LPI_INTID_START) * sizeof(lpite)),
  98                       MEMTXATTRS_UNSPECIFIED, &lpite, sizeof(lpite));
  99
 100    if (!(lpite & LPI_CTE_ENABLED)) {
 101        return;
 102    }
 103
 104    if (ds) {
 105        prio = lpite & LPI_PRIORITY_MASK;
 106    } else {
 107        prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80;
 108    }
 109
 110    if ((prio < hpp->prio) ||
 111        ((prio == hpp->prio) && (irq <= hpp->irq))) {
 112        hpp->irq = irq;
 113        hpp->prio = prio;
 114        /* LPIs and vLPIs are always non-secure Grp1 interrupts */
 115        hpp->grp = GICV3_G1NS;
 116    }
 117}
 118
 119/**
 120 * update_for_all_lpis: Fully scan LPI tables and find best pending LPI
 121 *
 122 * @cs: GICv3CPUState
 123 * @ptbase: physical address of LPI Pending table
 124 * @ctbase: physical address of LPI Configuration table
 125 * @ptsizebits: size of tables, specified as number of interrupt ID bits minus 1
 126 * @ds: true if priority value should not be shifted
 127 * @hpp: points to pending information to set
 128 *
 129 * Recalculate the highest priority pending enabled LPI from scratch,
 130 * and set @hpp accordingly.
 131 *
 132 * We scan the LPI pending table @ptbase; for each pending LPI, we read the
 133 * corresponding entry in the LPI configuration table @ctbase to extract
 134 * the priority and enabled information.
 135 *
 136 * We take @ptsizebits in the form idbits-1 because this is the way that
 137 * LPI table sizes are architecturally specified in GICR_PROPBASER.IDBits
 138 * and in the VMAPP command's VPT_size field.
 139 */
 140static void update_for_all_lpis(GICv3CPUState *cs, uint64_t ptbase,
 141                                uint64_t ctbase, unsigned ptsizebits,
 142                                bool ds, PendingIrq *hpp)
 143{
 144    AddressSpace *as = &cs->gic->dma_as;
 145    uint8_t pend;
 146    uint32_t pendt_size = (1ULL << (ptsizebits + 1));
 147    int i, bit;
 148
 149    hpp->prio = 0xff;
 150
 151    for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
 152        address_space_read(as, ptbase + i, MEMTXATTRS_UNSPECIFIED, &pend, 1);
 153        while (pend) {
 154            bit = ctz32(pend);
 155            update_for_one_lpi(cs, i * 8 + bit, ctbase, ds, hpp);
 156            pend &= ~(1 << bit);
 157        }
 158    }
 159}
 160
 161/**
 162 * set_lpi_pending_bit: Set or clear pending bit for an LPI
 163 *
 164 * @cs: GICv3CPUState
 165 * @ptbase: physical address of LPI Pending table
 166 * @irq: LPI to change pending state for
 167 * @level: false to clear pending state, true to set
 168 *
 169 * Returns true if we needed to do something, false if the pending bit
 170 * was already at @level.
 171 */
 172static bool set_pending_table_bit(GICv3CPUState *cs, uint64_t ptbase,
 173                                  int irq, bool level)
 174{
 175    AddressSpace *as = &cs->gic->dma_as;
 176    uint64_t addr = ptbase + irq / 8;
 177    uint8_t pend;
 178
 179    address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1);
 180    if (extract32(pend, irq % 8, 1) == level) {
 181        /* Bit already at requested state, no action required */
 182        return false;
 183    }
 184    pend = deposit32(pend, irq % 8, 1, level ? 1 : 0);
 185    address_space_write(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1);
 186    return true;
 187}
 188
 189static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs,
 190                                    int irq)
 191{
 192    /* Read the value of GICR_IPRIORITYR<n> for the specified interrupt,
 193     * honouring security state (these are RAZ/WI for Group 0 or Secure
 194     * Group 1 interrupts).
 195     */
 196    uint32_t prio;
 197
 198    prio = cs->gicr_ipriorityr[irq];
 199
 200    if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
 201        if (!(cs->gicr_igroupr0 & (1U << irq))) {
 202            /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
 203            return 0;
 204        }
 205        /* NS view of the interrupt priority */
 206        prio = (prio << 1) & 0xff;
 207    }
 208    return prio;
 209}
 210
 211static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq,
 212                                  uint8_t value)
 213{
 214    /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt,
 215     * honouring security state (these are RAZ/WI for Group 0 or Secure
 216     * Group 1 interrupts).
 217     */
 218    if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
 219        if (!(cs->gicr_igroupr0 & (1U << irq))) {
 220            /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
 221            return;
 222        }
 223        /* NS view of the interrupt priority */
 224        value = 0x80 | (value >> 1);
 225    }
 226    cs->gicr_ipriorityr[irq] = value;
 227}
 228
 229static void gicv3_redist_update_vlpi_only(GICv3CPUState *cs)
 230{
 231    uint64_t ptbase, ctbase, idbits;
 232
 233    if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) {
 234        cs->hppvlpi.prio = 0xff;
 235        return;
 236    }
 237
 238    ptbase = cs->gicr_vpendbaser & R_GICR_VPENDBASER_PHYADDR_MASK;
 239    ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
 240    idbits = FIELD_EX64(cs->gicr_vpropbaser, GICR_VPROPBASER, IDBITS);
 241
 242    update_for_all_lpis(cs, ptbase, ctbase, idbits, true, &cs->hppvlpi);
 243}
 244
 245static void gicv3_redist_update_vlpi(GICv3CPUState *cs)
 246{
 247    gicv3_redist_update_vlpi_only(cs);
 248    gicv3_cpuif_virt_irq_fiq_update(cs);
 249}
 250
 251static void gicr_write_vpendbaser(GICv3CPUState *cs, uint64_t newval)
 252{
 253    /* Write @newval to GICR_VPENDBASER, handling its effects */
 254    bool oldvalid = FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID);
 255    bool newvalid = FIELD_EX64(newval, GICR_VPENDBASER, VALID);
 256    bool pendinglast;
 257
 258    /*
 259     * The DIRTY bit is read-only and for us is always zero;
 260     * other fields are writable.
 261     */
 262    newval &= R_GICR_VPENDBASER_INNERCACHE_MASK |
 263        R_GICR_VPENDBASER_SHAREABILITY_MASK |
 264        R_GICR_VPENDBASER_PHYADDR_MASK |
 265        R_GICR_VPENDBASER_OUTERCACHE_MASK |
 266        R_GICR_VPENDBASER_PENDINGLAST_MASK |
 267        R_GICR_VPENDBASER_IDAI_MASK |
 268        R_GICR_VPENDBASER_VALID_MASK;
 269
 270    if (oldvalid && newvalid) {
 271        /*
 272         * Changing other fields while VALID is 1 is UNPREDICTABLE;
 273         * we choose to log and ignore the write.
 274         */
 275        if (cs->gicr_vpendbaser ^ newval) {
 276            qemu_log_mask(LOG_GUEST_ERROR,
 277                          "%s: Changing GICR_VPENDBASER when VALID=1 "
 278                          "is UNPREDICTABLE\n", __func__);
 279        }
 280        return;
 281    }
 282    if (!oldvalid && !newvalid) {
 283        cs->gicr_vpendbaser = newval;
 284        return;
 285    }
 286
 287    if (newvalid) {
 288        /*
 289         * Valid going from 0 to 1: update hppvlpi from tables.
 290         * If IDAI is 0 we are allowed to use the info we cached in
 291         * the IMPDEF area of the table.
 292         * PendingLast is RES1 when we make this transition.
 293         */
 294        pendinglast = true;
 295    } else {
 296        /*
 297         * Valid going from 1 to 0:
 298         * Set PendingLast if there was a pending enabled interrupt
 299         * for the vPE that was just descheduled.
 300         * If we cache info in the IMPDEF area, write it out here.
 301         */
 302        pendinglast = cs->hppvlpi.prio != 0xff;
 303    }
 304
 305    newval = FIELD_DP64(newval, GICR_VPENDBASER, PENDINGLAST, pendinglast);
 306    cs->gicr_vpendbaser = newval;
 307    gicv3_redist_update_vlpi(cs);
 308}
 309
 310static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset,
 311                              uint64_t *data, MemTxAttrs attrs)
 312{
 313    switch (offset) {
 314    case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
 315        *data = gicr_read_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR);
 316        return MEMTX_OK;
 317    default:
 318        return MEMTX_ERROR;
 319    }
 320}
 321
 322static MemTxResult gicr_writeb(GICv3CPUState *cs, hwaddr offset,
 323                               uint64_t value, MemTxAttrs attrs)
 324{
 325    switch (offset) {
 326    case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
 327        gicr_write_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR, value);
 328        gicv3_redist_update(cs);
 329        return MEMTX_OK;
 330    default:
 331        return MEMTX_ERROR;
 332    }
 333}
 334
 335static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
 336                              uint64_t *data, MemTxAttrs attrs)
 337{
 338    switch (offset) {
 339    case GICR_CTLR:
 340        *data = cs->gicr_ctlr;
 341        return MEMTX_OK;
 342    case GICR_IIDR:
 343        *data = gicv3_iidr();
 344        return MEMTX_OK;
 345    case GICR_TYPER:
 346        *data = extract64(cs->gicr_typer, 0, 32);
 347        return MEMTX_OK;
 348    case GICR_TYPER + 4:
 349        *data = extract64(cs->gicr_typer, 32, 32);
 350        return MEMTX_OK;
 351    case GICR_STATUSR:
 352        /* RAZ/WI for us (this is an optional register and our implementation
 353         * does not track RO/WO/reserved violations to report them to the guest)
 354         */
 355        *data = 0;
 356        return MEMTX_OK;
 357    case GICR_WAKER:
 358        *data = cs->gicr_waker;
 359        return MEMTX_OK;
 360    case GICR_PROPBASER:
 361        *data = extract64(cs->gicr_propbaser, 0, 32);
 362        return MEMTX_OK;
 363    case GICR_PROPBASER + 4:
 364        *data = extract64(cs->gicr_propbaser, 32, 32);
 365        return MEMTX_OK;
 366    case GICR_PENDBASER:
 367        *data = extract64(cs->gicr_pendbaser, 0, 32);
 368        return MEMTX_OK;
 369    case GICR_PENDBASER + 4:
 370        *data = extract64(cs->gicr_pendbaser, 32, 32);
 371        return MEMTX_OK;
 372    case GICR_IGROUPR0:
 373        if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
 374            *data = 0;
 375            return MEMTX_OK;
 376        }
 377        *data = cs->gicr_igroupr0;
 378        return MEMTX_OK;
 379    case GICR_ISENABLER0:
 380    case GICR_ICENABLER0:
 381        *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_ienabler0);
 382        return MEMTX_OK;
 383    case GICR_ISPENDR0:
 384    case GICR_ICPENDR0:
 385    {
 386        /* The pending register reads as the logical OR of the pending
 387         * latch and the input line level for level-triggered interrupts.
 388         */
 389        uint32_t val = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
 390        *data = gicr_read_bitmap_reg(cs, attrs, val);
 391        return MEMTX_OK;
 392    }
 393    case GICR_ISACTIVER0:
 394    case GICR_ICACTIVER0:
 395        *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_iactiver0);
 396        return MEMTX_OK;
 397    case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
 398    {
 399        int i, irq = offset - GICR_IPRIORITYR;
 400        uint32_t value = 0;
 401
 402        for (i = irq + 3; i >= irq; i--) {
 403            value <<= 8;
 404            value |= gicr_read_ipriorityr(cs, attrs, i);
 405        }
 406        *data = value;
 407        return MEMTX_OK;
 408    }
 409    case GICR_ICFGR0:
 410    case GICR_ICFGR1:
 411    {
 412        /* Our edge_trigger bitmap is one bit per irq; take the correct
 413         * half of it, and spread it out into the odd bits.
 414         */
 415        uint32_t value;
 416
 417        value = cs->edge_trigger & mask_group(cs, attrs);
 418        value = extract32(value, (offset == GICR_ICFGR1) ? 16 : 0, 16);
 419        value = half_shuffle32(value) << 1;
 420        *data = value;
 421        return MEMTX_OK;
 422    }
 423    case GICR_IGRPMODR0:
 424        if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
 425            /* RAZ/WI if security disabled, or if
 426             * security enabled and this is an NS access
 427             */
 428            *data = 0;
 429            return MEMTX_OK;
 430        }
 431        *data = cs->gicr_igrpmodr0;
 432        return MEMTX_OK;
 433    case GICR_NSACR:
 434        if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
 435            /* RAZ/WI if security disabled, or if
 436             * security enabled and this is an NS access
 437             */
 438            *data = 0;
 439            return MEMTX_OK;
 440        }
 441        *data = cs->gicr_nsacr;
 442        return MEMTX_OK;
 443    case GICR_IDREGS ... GICR_IDREGS + 0x2f:
 444        *data = gicv3_idreg(cs->gic, offset - GICR_IDREGS, GICV3_PIDR0_REDIST);
 445        return MEMTX_OK;
 446        /*
 447         * VLPI frame registers. We don't need a version check for
 448         * VPROPBASER and VPENDBASER because gicv3_redist_size() will
 449         * prevent pre-v4 GIC from passing us offsets this high.
 450         */
 451    case GICR_VPROPBASER:
 452        *data = extract64(cs->gicr_vpropbaser, 0, 32);
 453        return MEMTX_OK;
 454    case GICR_VPROPBASER + 4:
 455        *data = extract64(cs->gicr_vpropbaser, 32, 32);
 456        return MEMTX_OK;
 457    case GICR_VPENDBASER:
 458        *data = extract64(cs->gicr_vpendbaser, 0, 32);
 459        return MEMTX_OK;
 460    case GICR_VPENDBASER + 4:
 461        *data = extract64(cs->gicr_vpendbaser, 32, 32);
 462        return MEMTX_OK;
 463    default:
 464        return MEMTX_ERROR;
 465    }
 466}
 467
 468static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
 469                               uint64_t value, MemTxAttrs attrs)
 470{
 471    switch (offset) {
 472    case GICR_CTLR:
 473        /* For our implementation, GICR_TYPER.DPGS is 0 and so all
 474         * the DPG bits are RAZ/WI. We don't do anything asynchronously,
 475         * so UWP and RWP are RAZ/WI. GICR_TYPER.LPIS is 1 (we
 476         * implement LPIs) so Enable_LPIs is programmable.
 477         */
 478        if (cs->gicr_typer & GICR_TYPER_PLPIS) {
 479            if (value & GICR_CTLR_ENABLE_LPIS) {
 480                cs->gicr_ctlr |= GICR_CTLR_ENABLE_LPIS;
 481                /* Check for any pending interr in pending table */
 482                gicv3_redist_update_lpi(cs);
 483            } else {
 484                cs->gicr_ctlr &= ~GICR_CTLR_ENABLE_LPIS;
 485                /* cs->hppi might have been an LPI; recalculate */
 486                gicv3_redist_update(cs);
 487            }
 488        }
 489        return MEMTX_OK;
 490    case GICR_STATUSR:
 491        /* RAZ/WI for our implementation */
 492        return MEMTX_OK;
 493    case GICR_WAKER:
 494        /* Only the ProcessorSleep bit is writable. When the guest sets
 495         * it, it requests that we transition the channel between the
 496         * redistributor and the cpu interface to quiescent, and that
 497         * we set the ChildrenAsleep bit once the inteface has reached the
 498         * quiescent state.
 499         * Setting the ProcessorSleep to 0 reverses the quiescing, and
 500         * ChildrenAsleep is cleared once the transition is complete.
 501         * Since our interface is not asynchronous, we complete these
 502         * transitions instantaneously, so we set ChildrenAsleep to the
 503         * same value as ProcessorSleep here.
 504         */
 505        value &= GICR_WAKER_ProcessorSleep;
 506        if (value & GICR_WAKER_ProcessorSleep) {
 507            value |= GICR_WAKER_ChildrenAsleep;
 508        }
 509        cs->gicr_waker = value;
 510        return MEMTX_OK;
 511    case GICR_PROPBASER:
 512        cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 0, 32, value);
 513        return MEMTX_OK;
 514    case GICR_PROPBASER + 4:
 515        cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 32, 32, value);
 516        return MEMTX_OK;
 517    case GICR_PENDBASER:
 518        cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 0, 32, value);
 519        return MEMTX_OK;
 520    case GICR_PENDBASER + 4:
 521        cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 32, 32, value);
 522        return MEMTX_OK;
 523    case GICR_IGROUPR0:
 524        if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
 525            return MEMTX_OK;
 526        }
 527        cs->gicr_igroupr0 = value;
 528        gicv3_redist_update(cs);
 529        return MEMTX_OK;
 530    case GICR_ISENABLER0:
 531        gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
 532        return MEMTX_OK;
 533    case GICR_ICENABLER0:
 534        gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
 535        return MEMTX_OK;
 536    case GICR_ISPENDR0:
 537        gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
 538        return MEMTX_OK;
 539    case GICR_ICPENDR0:
 540        gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
 541        return MEMTX_OK;
 542    case GICR_ISACTIVER0:
 543        gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
 544        return MEMTX_OK;
 545    case GICR_ICACTIVER0:
 546        gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
 547        return MEMTX_OK;
 548    case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
 549    {
 550        int i, irq = offset - GICR_IPRIORITYR;
 551
 552        for (i = irq; i < irq + 4; i++, value >>= 8) {
 553            gicr_write_ipriorityr(cs, attrs, i, value);
 554        }
 555        gicv3_redist_update(cs);
 556        return MEMTX_OK;
 557    }
 558    case GICR_ICFGR0:
 559        /* Register is all RAZ/WI or RAO/WI bits */
 560        return MEMTX_OK;
 561    case GICR_ICFGR1:
 562    {
 563        uint32_t mask;
 564
 565        /* Since our edge_trigger bitmap is one bit per irq, our input
 566         * 32-bits will compress down into 16 bits which we need
 567         * to write into the bitmap.
 568         */
 569        value = half_unshuffle32(value >> 1) << 16;
 570        mask = mask_group(cs, attrs) & 0xffff0000U;
 571
 572        cs->edge_trigger &= ~mask;
 573        cs->edge_trigger |= (value & mask);
 574
 575        gicv3_redist_update(cs);
 576        return MEMTX_OK;
 577    }
 578    case GICR_IGRPMODR0:
 579        if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
 580            /* RAZ/WI if security disabled, or if
 581             * security enabled and this is an NS access
 582             */
 583            return MEMTX_OK;
 584        }
 585        cs->gicr_igrpmodr0 = value;
 586        gicv3_redist_update(cs);
 587        return MEMTX_OK;
 588    case GICR_NSACR:
 589        if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
 590            /* RAZ/WI if security disabled, or if
 591             * security enabled and this is an NS access
 592             */
 593            return MEMTX_OK;
 594        }
 595        cs->gicr_nsacr = value;
 596        /* no update required as this only affects access permission checks */
 597        return MEMTX_OK;
 598    case GICR_IIDR:
 599    case GICR_TYPER:
 600    case GICR_IDREGS ... GICR_IDREGS + 0x2f:
 601        /* RO registers, ignore the write */
 602        qemu_log_mask(LOG_GUEST_ERROR,
 603                      "%s: invalid guest write to RO register at offset "
 604                      TARGET_FMT_plx "\n", __func__, offset);
 605        return MEMTX_OK;
 606        /*
 607         * VLPI frame registers. We don't need a version check for
 608         * VPROPBASER and VPENDBASER because gicv3_redist_size() will
 609         * prevent pre-v4 GIC from passing us offsets this high.
 610         */
 611    case GICR_VPROPBASER:
 612        cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 0, 32, value);
 613        return MEMTX_OK;
 614    case GICR_VPROPBASER + 4:
 615        cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 32, 32, value);
 616        return MEMTX_OK;
 617    case GICR_VPENDBASER:
 618        gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 0, 32, value));
 619        return MEMTX_OK;
 620    case GICR_VPENDBASER + 4:
 621        gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 32, 32, value));
 622        return MEMTX_OK;
 623    default:
 624        return MEMTX_ERROR;
 625    }
 626}
 627
 628static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset,
 629                               uint64_t *data, MemTxAttrs attrs)
 630{
 631    switch (offset) {
 632    case GICR_TYPER:
 633        *data = cs->gicr_typer;
 634        return MEMTX_OK;
 635    case GICR_PROPBASER:
 636        *data = cs->gicr_propbaser;
 637        return MEMTX_OK;
 638    case GICR_PENDBASER:
 639        *data = cs->gicr_pendbaser;
 640        return MEMTX_OK;
 641        /*
 642         * VLPI frame registers. We don't need a version check for
 643         * VPROPBASER and VPENDBASER because gicv3_redist_size() will
 644         * prevent pre-v4 GIC from passing us offsets this high.
 645         */
 646    case GICR_VPROPBASER:
 647        *data = cs->gicr_vpropbaser;
 648        return MEMTX_OK;
 649    case GICR_VPENDBASER:
 650        *data = cs->gicr_vpendbaser;
 651        return MEMTX_OK;
 652    default:
 653        return MEMTX_ERROR;
 654    }
 655}
 656
 657static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset,
 658                                uint64_t value, MemTxAttrs attrs)
 659{
 660    switch (offset) {
 661    case GICR_PROPBASER:
 662        cs->gicr_propbaser = value;
 663        return MEMTX_OK;
 664    case GICR_PENDBASER:
 665        cs->gicr_pendbaser = value;
 666        return MEMTX_OK;
 667    case GICR_TYPER:
 668        /* RO register, ignore the write */
 669        qemu_log_mask(LOG_GUEST_ERROR,
 670                      "%s: invalid guest write to RO register at offset "
 671                      TARGET_FMT_plx "\n", __func__, offset);
 672        return MEMTX_OK;
 673        /*
 674         * VLPI frame registers. We don't need a version check for
 675         * VPROPBASER and VPENDBASER because gicv3_redist_size() will
 676         * prevent pre-v4 GIC from passing us offsets this high.
 677         */
 678    case GICR_VPROPBASER:
 679        cs->gicr_vpropbaser = value;
 680        return MEMTX_OK;
 681    case GICR_VPENDBASER:
 682        gicr_write_vpendbaser(cs, value);
 683        return MEMTX_OK;
 684    default:
 685        return MEMTX_ERROR;
 686    }
 687}
 688
 689MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
 690                              unsigned size, MemTxAttrs attrs)
 691{
 692    GICv3RedistRegion *region = opaque;
 693    GICv3State *s = region->gic;
 694    GICv3CPUState *cs;
 695    MemTxResult r;
 696    int cpuidx;
 697
 698    assert((offset & (size - 1)) == 0);
 699
 700    /*
 701     * There are (for GICv3) two 64K redistributor pages per CPU.
 702     * In some cases the redistributor pages for all CPUs are not
 703     * contiguous (eg on the virt board they are split into two
 704     * parts if there are too many CPUs to all fit in the same place
 705     * in the memory map); if so then the GIC has multiple MemoryRegions
 706     * for the redistributors.
 707     */
 708    cpuidx = region->cpuidx + offset / gicv3_redist_size(s);
 709    offset %= gicv3_redist_size(s);
 710
 711    cs = &s->cpu[cpuidx];
 712
 713    switch (size) {
 714    case 1:
 715        r = gicr_readb(cs, offset, data, attrs);
 716        break;
 717    case 4:
 718        r = gicr_readl(cs, offset, data, attrs);
 719        break;
 720    case 8:
 721        r = gicr_readll(cs, offset, data, attrs);
 722        break;
 723    default:
 724        r = MEMTX_ERROR;
 725        break;
 726    }
 727
 728    if (r != MEMTX_OK) {
 729        qemu_log_mask(LOG_GUEST_ERROR,
 730                      "%s: invalid guest read at offset " TARGET_FMT_plx
 731                      " size %u\n", __func__, offset, size);
 732        trace_gicv3_redist_badread(gicv3_redist_affid(cs), offset,
 733                                   size, attrs.secure);
 734        /* The spec requires that reserved registers are RAZ/WI;
 735         * so use MEMTX_ERROR returns from leaf functions as a way to
 736         * trigger the guest-error logging but don't return it to
 737         * the caller, or we'll cause a spurious guest data abort.
 738         */
 739        r = MEMTX_OK;
 740        *data = 0;
 741    } else {
 742        trace_gicv3_redist_read(gicv3_redist_affid(cs), offset, *data,
 743                                size, attrs.secure);
 744    }
 745    return r;
 746}
 747
 748MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
 749                               unsigned size, MemTxAttrs attrs)
 750{
 751    GICv3RedistRegion *region = opaque;
 752    GICv3State *s = region->gic;
 753    GICv3CPUState *cs;
 754    MemTxResult r;
 755    int cpuidx;
 756
 757    assert((offset & (size - 1)) == 0);
 758
 759    /*
 760     * There are (for GICv3) two 64K redistributor pages per CPU.
 761     * In some cases the redistributor pages for all CPUs are not
 762     * contiguous (eg on the virt board they are split into two
 763     * parts if there are too many CPUs to all fit in the same place
 764     * in the memory map); if so then the GIC has multiple MemoryRegions
 765     * for the redistributors.
 766     */
 767    cpuidx = region->cpuidx + offset / gicv3_redist_size(s);
 768    offset %= gicv3_redist_size(s);
 769
 770    cs = &s->cpu[cpuidx];
 771
 772    switch (size) {
 773    case 1:
 774        r = gicr_writeb(cs, offset, data, attrs);
 775        break;
 776    case 4:
 777        r = gicr_writel(cs, offset, data, attrs);
 778        break;
 779    case 8:
 780        r = gicr_writell(cs, offset, data, attrs);
 781        break;
 782    default:
 783        r = MEMTX_ERROR;
 784        break;
 785    }
 786
 787    if (r != MEMTX_OK) {
 788        qemu_log_mask(LOG_GUEST_ERROR,
 789                      "%s: invalid guest write at offset " TARGET_FMT_plx
 790                      " size %u\n", __func__, offset, size);
 791        trace_gicv3_redist_badwrite(gicv3_redist_affid(cs), offset, data,
 792                                    size, attrs.secure);
 793        /* The spec requires that reserved registers are RAZ/WI;
 794         * so use MEMTX_ERROR returns from leaf functions as a way to
 795         * trigger the guest-error logging but don't return it to
 796         * the caller, or we'll cause a spurious guest data abort.
 797         */
 798        r = MEMTX_OK;
 799    } else {
 800        trace_gicv3_redist_write(gicv3_redist_affid(cs), offset, data,
 801                                 size, attrs.secure);
 802    }
 803    return r;
 804}
 805
 806static void gicv3_redist_check_lpi_priority(GICv3CPUState *cs, int irq)
 807{
 808    uint64_t lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
 809
 810    update_for_one_lpi(cs, irq, lpict_baddr,
 811                       cs->gic->gicd_ctlr & GICD_CTLR_DS,
 812                       &cs->hpplpi);
 813}
 814
 815void gicv3_redist_update_lpi_only(GICv3CPUState *cs)
 816{
 817    /*
 818     * This function scans the LPI pending table and for each pending
 819     * LPI, reads the corresponding entry from LPI configuration table
 820     * to extract the priority info and determine if the current LPI
 821     * priority is lower than the last computed high priority lpi interrupt.
 822     * If yes, replace current LPI as the new high priority lpi interrupt.
 823     */
 824    uint64_t lpipt_baddr, lpict_baddr;
 825    uint64_t idbits;
 826
 827    idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
 828                 GICD_TYPER_IDBITS);
 829
 830    if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
 831        return;
 832    }
 833
 834    lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
 835    lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
 836
 837    update_for_all_lpis(cs, lpipt_baddr, lpict_baddr, idbits,
 838                        cs->gic->gicd_ctlr & GICD_CTLR_DS, &cs->hpplpi);
 839}
 840
 841void gicv3_redist_update_lpi(GICv3CPUState *cs)
 842{
 843    gicv3_redist_update_lpi_only(cs);
 844    gicv3_redist_update(cs);
 845}
 846
 847void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level)
 848{
 849    /*
 850     * This function updates the pending bit in lpi pending table for
 851     * the irq being activated or deactivated.
 852     */
 853    uint64_t lpipt_baddr;
 854
 855    lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
 856    if (!set_pending_table_bit(cs, lpipt_baddr, irq, level)) {
 857        /* no change in the value of pending bit, return */
 858        return;
 859    }
 860
 861    /*
 862     * check if this LPI is better than the current hpplpi, if yes
 863     * just set hpplpi.prio and .irq without doing a full rescan
 864     */
 865    if (level) {
 866        gicv3_redist_check_lpi_priority(cs, irq);
 867        gicv3_redist_update(cs);
 868    } else {
 869        if (irq == cs->hpplpi.irq) {
 870            gicv3_redist_update_lpi(cs);
 871        }
 872    }
 873}
 874
 875void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level)
 876{
 877    uint64_t idbits;
 878
 879    idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
 880                 GICD_TYPER_IDBITS);
 881
 882    if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
 883        (irq > (1ULL << (idbits + 1)) - 1) || irq < GICV3_LPI_INTID_START) {
 884        return;
 885    }
 886
 887    /* set/clear the pending bit for this irq */
 888    gicv3_redist_lpi_pending(cs, irq, level);
 889}
 890
 891void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq)
 892{
 893    /*
 894     * The only cached information for LPIs we have is the HPPLPI.
 895     * We could be cleverer about identifying when we don't need
 896     * to do a full rescan of the pending table, but until we find
 897     * this is a performance issue, just always recalculate.
 898     */
 899    gicv3_redist_update_lpi(cs);
 900}
 901
 902void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq)
 903{
 904    /*
 905     * Move the specified LPI's pending state from the source redistributor
 906     * to the destination.
 907     *
 908     * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
 909     * we choose to NOP. If LPIs are disabled on source there's nothing
 910     * to be transferred anyway.
 911     */
 912    uint64_t idbits;
 913    uint32_t pendt_size;
 914    uint64_t src_baddr;
 915
 916    if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
 917        !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
 918        return;
 919    }
 920
 921    idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS),
 922                 GICD_TYPER_IDBITS);
 923    idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS),
 924                 idbits);
 925
 926    pendt_size = 1ULL << (idbits + 1);
 927    if ((irq / 8) >= pendt_size) {
 928        return;
 929    }
 930
 931    src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
 932
 933    if (!set_pending_table_bit(src, src_baddr, irq, 0)) {
 934        /* Not pending on source, nothing to do */
 935        return;
 936    }
 937    if (irq == src->hpplpi.irq) {
 938        /*
 939         * We just made this LPI not-pending so only need to update
 940         * if it was previously the highest priority pending LPI
 941         */
 942        gicv3_redist_update_lpi(src);
 943    }
 944    /* Mark it pending on the destination */
 945    gicv3_redist_lpi_pending(dest, irq, 1);
 946}
 947
 948void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest)
 949{
 950    /*
 951     * We must move all pending LPIs from the source redistributor
 952     * to the destination. That is, for every pending LPI X on
 953     * src, we must set it not-pending on src and pending on dest.
 954     * LPIs that are already pending on dest are not cleared.
 955     *
 956     * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
 957     * we choose to NOP. If LPIs are disabled on source there's nothing
 958     * to be transferred anyway.
 959     */
 960    AddressSpace *as = &src->gic->dma_as;
 961    uint64_t idbits;
 962    uint32_t pendt_size;
 963    uint64_t src_baddr, dest_baddr;
 964    int i;
 965
 966    if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
 967        !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
 968        return;
 969    }
 970
 971    idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS),
 972                 GICD_TYPER_IDBITS);
 973    idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS),
 974                 idbits);
 975
 976    pendt_size = 1ULL << (idbits + 1);
 977    src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
 978    dest_baddr = dest->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
 979
 980    for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
 981        uint8_t src_pend, dest_pend;
 982
 983        address_space_read(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED,
 984                           &src_pend, sizeof(src_pend));
 985        if (!src_pend) {
 986            continue;
 987        }
 988        address_space_read(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED,
 989                           &dest_pend, sizeof(dest_pend));
 990        dest_pend |= src_pend;
 991        src_pend = 0;
 992        address_space_write(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED,
 993                            &src_pend, sizeof(src_pend));
 994        address_space_write(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED,
 995                            &dest_pend, sizeof(dest_pend));
 996    }
 997
 998    gicv3_redist_update_lpi(src);
 999    gicv3_redist_update_lpi(dest);
1000}
1001
1002void gicv3_redist_vlpi_pending(GICv3CPUState *cs, int irq, int level)
1003{
1004    /*
1005     * Change the pending state of the specified vLPI.
1006     * Unlike gicv3_redist_process_vlpi(), we know here that the
1007     * vCPU is definitely resident on this redistributor, and that
1008     * the irq is in range.
1009     */
1010    uint64_t vptbase, ctbase;
1011
1012    vptbase = FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, PHYADDR) << 16;
1013
1014    if (set_pending_table_bit(cs, vptbase, irq, level)) {
1015        if (level) {
1016            /* Check whether this vLPI is now the best */
1017            ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
1018            update_for_one_lpi(cs, irq, ctbase, true, &cs->hppvlpi);
1019            gicv3_cpuif_virt_irq_fiq_update(cs);
1020        } else {
1021            /* Only need to recalculate if this was previously the best vLPI */
1022            if (irq == cs->hppvlpi.irq) {
1023                gicv3_redist_update_vlpi(cs);
1024            }
1025        }
1026    }
1027}
1028
1029void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr,
1030                               int doorbell, int level)
1031{
1032    bool bit_changed;
1033    bool resident = vcpu_resident(cs, vptaddr);
1034    uint64_t ctbase;
1035
1036    if (resident) {
1037        uint32_t idbits = FIELD_EX64(cs->gicr_vpropbaser, GICR_VPROPBASER, IDBITS);
1038        if (irq >= (1ULL << (idbits + 1))) {
1039            return;
1040        }
1041    }
1042
1043    bit_changed = set_pending_table_bit(cs, vptaddr, irq, level);
1044    if (resident && bit_changed) {
1045        if (level) {
1046            /* Check whether this vLPI is now the best */
1047            ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
1048            update_for_one_lpi(cs, irq, ctbase, true, &cs->hppvlpi);
1049            gicv3_cpuif_virt_irq_fiq_update(cs);
1050        } else {
1051            /* Only need to recalculate if this was previously the best vLPI */
1052            if (irq == cs->hppvlpi.irq) {
1053                gicv3_redist_update_vlpi(cs);
1054            }
1055        }
1056    }
1057
1058    if (!resident && level && doorbell != INTID_SPURIOUS &&
1059        (cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
1060        /* vCPU is not currently resident: ring the doorbell */
1061        gicv3_redist_process_lpi(cs, doorbell, 1);
1062    }
1063}
1064
1065void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr,
1066                           GICv3CPUState *dest, uint64_t dest_vptaddr,
1067                           int irq, int doorbell)
1068{
1069    /*
1070     * Move the specified vLPI's pending state from the source redistributor
1071     * to the destination.
1072     */
1073    if (!set_pending_table_bit(src, src_vptaddr, irq, 0)) {
1074        /* Not pending on source, nothing to do */
1075        return;
1076    }
1077    if (vcpu_resident(src, src_vptaddr) && irq == src->hppvlpi.irq) {
1078        /*
1079         * Update src's cached highest-priority pending vLPI if we just made
1080         * it not-pending
1081         */
1082        gicv3_redist_update_vlpi(src);
1083    }
1084    /*
1085     * Mark the vLPI pending on the destination (ringing the doorbell
1086     * if the vCPU isn't resident)
1087     */
1088    gicv3_redist_process_vlpi(dest, irq, dest_vptaddr, doorbell, irq);
1089}
1090
1091void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr)
1092{
1093    if (!vcpu_resident(cs, vptaddr)) {
1094        /* We don't have anything cached if the vCPU isn't resident */
1095        return;
1096    }
1097
1098    /* Otherwise, our only cached information is the HPPVLPI info */
1099    gicv3_redist_update_vlpi(cs);
1100}
1101
1102void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr)
1103{
1104    /*
1105     * The only cached information for LPIs we have is the HPPLPI.
1106     * We could be cleverer about identifying when we don't need
1107     * to do a full rescan of the pending table, but until we find
1108     * this is a performance issue, just always recalculate.
1109     */
1110    gicv3_redist_vinvall(cs, vptaddr);
1111}
1112
1113void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level)
1114{
1115    /* Update redistributor state for a change in an external PPI input line */
1116    if (level == extract32(cs->level, irq, 1)) {
1117        return;
1118    }
1119
1120    trace_gicv3_redist_set_irq(gicv3_redist_affid(cs), irq, level);
1121
1122    cs->level = deposit32(cs->level, irq, 1, level);
1123
1124    if (level) {
1125        /* 0->1 edges latch the pending bit for edge-triggered interrupts */
1126        if (extract32(cs->edge_trigger, irq, 1)) {
1127            cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
1128        }
1129    }
1130
1131    gicv3_redist_update(cs);
1132}
1133
1134void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns)
1135{
1136    /* Update redistributor state for a generated SGI */
1137    int irqgrp = gicv3_irq_group(cs->gic, cs, irq);
1138
1139    /* If we are asked for a Secure Group 1 SGI and it's actually
1140     * configured as Secure Group 0 this is OK (subject to the usual
1141     * NSACR checks).
1142     */
1143    if (grp == GICV3_G1 && irqgrp == GICV3_G0) {
1144        grp = GICV3_G0;
1145    }
1146
1147    if (grp != irqgrp) {
1148        return;
1149    }
1150
1151    if (ns && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
1152        /* If security is enabled we must test the NSACR bits */
1153        int nsaccess = gicr_ns_access(cs, irq);
1154
1155        if ((irqgrp == GICV3_G0 && nsaccess < 1) ||
1156            (irqgrp == GICV3_G1 && nsaccess < 2)) {
1157            return;
1158        }
1159    }
1160
1161    /* OK, we can accept the SGI */
1162    trace_gicv3_redist_send_sgi(gicv3_redist_affid(cs), irq);
1163    cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
1164    gicv3_redist_update(cs);
1165}
1166