qemu/hw/intc/arm_gicv3_redist.c
<<
>>
Prefs
   1/*
   2 * ARM GICv3 emulation: Redistributor
   3 *
   4 * Copyright (c) 2015 Huawei.
   5 * Copyright (c) 2016 Linaro Limited.
   6 * Written by Shlomo Pongratz, Peter Maydell
   7 *
   8 * This code is licensed under the GPL, version 2 or (at your option)
   9 * any later version.
  10 */
  11
  12#include "qemu/osdep.h"
  13#include "qemu/log.h"
  14#include "trace.h"
  15#include "gicv3_internal.h"
  16
  17static uint32_t mask_group(GICv3CPUState *cs, MemTxAttrs attrs)
  18{
  19    /* Return a 32-bit mask which should be applied for this set of 32
  20     * interrupts; each bit is 1 if access is permitted by the
  21     * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does
  22     * not affect config register accesses, unlike GICD_NSACR.)
  23     */
  24    if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
  25        /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */
  26        return cs->gicr_igroupr0;
  27    }
  28    return 0xFFFFFFFFU;
  29}
  30
  31static int gicr_ns_access(GICv3CPUState *cs, int irq)
  32{
  33    /* Return the 2 bit NSACR.NS_access field for this SGI */
  34    assert(irq < 16);
  35    return extract32(cs->gicr_nsacr, irq * 2, 2);
  36}
  37
  38static void gicr_write_set_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
  39                                      uint32_t *reg, uint32_t val)
  40{
  41    /* Helper routine to implement writing to a "set-bitmap" register */
  42    val &= mask_group(cs, attrs);
  43    *reg |= val;
  44    gicv3_redist_update(cs);
  45}
  46
  47static void gicr_write_clear_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
  48                                        uint32_t *reg, uint32_t val)
  49{
  50    /* Helper routine to implement writing to a "clear-bitmap" register */
  51    val &= mask_group(cs, attrs);
  52    *reg &= ~val;
  53    gicv3_redist_update(cs);
  54}
  55
  56static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
  57                                     uint32_t reg)
  58{
  59    reg &= mask_group(cs, attrs);
  60    return reg;
  61}
  62
  63static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs,
  64                                    int irq)
  65{
  66    /* Read the value of GICR_IPRIORITYR<n> for the specified interrupt,
  67     * honouring security state (these are RAZ/WI for Group 0 or Secure
  68     * Group 1 interrupts).
  69     */
  70    uint32_t prio;
  71
  72    prio = cs->gicr_ipriorityr[irq];
  73
  74    if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
  75        if (!(cs->gicr_igroupr0 & (1U << irq))) {
  76            /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
  77            return 0;
  78        }
  79        /* NS view of the interrupt priority */
  80        prio = (prio << 1) & 0xff;
  81    }
  82    return prio;
  83}
  84
  85static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq,
  86                                  uint8_t value)
  87{
  88    /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt,
  89     * honouring security state (these are RAZ/WI for Group 0 or Secure
  90     * Group 1 interrupts).
  91     */
  92    if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
  93        if (!(cs->gicr_igroupr0 & (1U << irq))) {
  94            /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
  95            return;
  96        }
  97        /* NS view of the interrupt priority */
  98        value = 0x80 | (value >> 1);
  99    }
 100    cs->gicr_ipriorityr[irq] = value;
 101}
 102
 103static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset,
 104                              uint64_t *data, MemTxAttrs attrs)
 105{
 106    switch (offset) {
 107    case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
 108        *data = gicr_read_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR);
 109        return MEMTX_OK;
 110    default:
 111        return MEMTX_ERROR;
 112    }
 113}
 114
 115static MemTxResult gicr_writeb(GICv3CPUState *cs, hwaddr offset,
 116                               uint64_t value, MemTxAttrs attrs)
 117{
 118    switch (offset) {
 119    case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
 120        gicr_write_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR, value);
 121        gicv3_redist_update(cs);
 122        return MEMTX_OK;
 123    default:
 124        return MEMTX_ERROR;
 125    }
 126}
 127
 128static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
 129                              uint64_t *data, MemTxAttrs attrs)
 130{
 131    switch (offset) {
 132    case GICR_CTLR:
 133        *data = cs->gicr_ctlr;
 134        return MEMTX_OK;
 135    case GICR_IIDR:
 136        *data = gicv3_iidr();
 137        return MEMTX_OK;
 138    case GICR_TYPER:
 139        *data = extract64(cs->gicr_typer, 0, 32);
 140        return MEMTX_OK;
 141    case GICR_TYPER + 4:
 142        *data = extract64(cs->gicr_typer, 32, 32);
 143        return MEMTX_OK;
 144    case GICR_STATUSR:
 145        /* RAZ/WI for us (this is an optional register and our implementation
 146         * does not track RO/WO/reserved violations to report them to the guest)
 147         */
 148        *data = 0;
 149        return MEMTX_OK;
 150    case GICR_WAKER:
 151        *data = cs->gicr_waker;
 152        return MEMTX_OK;
 153    case GICR_PROPBASER:
 154        *data = extract64(cs->gicr_propbaser, 0, 32);
 155        return MEMTX_OK;
 156    case GICR_PROPBASER + 4:
 157        *data = extract64(cs->gicr_propbaser, 32, 32);
 158        return MEMTX_OK;
 159    case GICR_PENDBASER:
 160        *data = extract64(cs->gicr_pendbaser, 0, 32);
 161        return MEMTX_OK;
 162    case GICR_PENDBASER + 4:
 163        *data = extract64(cs->gicr_pendbaser, 32, 32);
 164        return MEMTX_OK;
 165    case GICR_IGROUPR0:
 166        if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
 167            *data = 0;
 168            return MEMTX_OK;
 169        }
 170        *data = cs->gicr_igroupr0;
 171        return MEMTX_OK;
 172    case GICR_ISENABLER0:
 173    case GICR_ICENABLER0:
 174        *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_ienabler0);
 175        return MEMTX_OK;
 176    case GICR_ISPENDR0:
 177    case GICR_ICPENDR0:
 178    {
 179        /* The pending register reads as the logical OR of the pending
 180         * latch and the input line level for level-triggered interrupts.
 181         */
 182        uint32_t val = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
 183        *data = gicr_read_bitmap_reg(cs, attrs, val);
 184        return MEMTX_OK;
 185    }
 186    case GICR_ISACTIVER0:
 187    case GICR_ICACTIVER0:
 188        *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_iactiver0);
 189        return MEMTX_OK;
 190    case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
 191    {
 192        int i, irq = offset - GICR_IPRIORITYR;
 193        uint32_t value = 0;
 194
 195        for (i = irq + 3; i >= irq; i--, value <<= 8) {
 196            value |= gicr_read_ipriorityr(cs, attrs, i);
 197        }
 198        *data = value;
 199        return MEMTX_OK;
 200    }
 201    case GICR_ICFGR0:
 202    case GICR_ICFGR1:
 203    {
 204        /* Our edge_trigger bitmap is one bit per irq; take the correct
 205         * half of it, and spread it out into the odd bits.
 206         */
 207        uint32_t value;
 208
 209        value = cs->edge_trigger & mask_group(cs, attrs);
 210        value = extract32(value, (offset == GICR_ICFGR1) ? 16 : 0, 16);
 211        value = half_shuffle32(value) << 1;
 212        *data = value;
 213        return MEMTX_OK;
 214    }
 215    case GICR_IGRPMODR0:
 216        if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
 217            /* RAZ/WI if security disabled, or if
 218             * security enabled and this is an NS access
 219             */
 220            *data = 0;
 221            return MEMTX_OK;
 222        }
 223        *data = cs->gicr_igrpmodr0;
 224        return MEMTX_OK;
 225    case GICR_NSACR:
 226        if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
 227            /* RAZ/WI if security disabled, or if
 228             * security enabled and this is an NS access
 229             */
 230            *data = 0;
 231            return MEMTX_OK;
 232        }
 233        *data = cs->gicr_nsacr;
 234        return MEMTX_OK;
 235    case GICR_IDREGS ... GICR_IDREGS + 0x1f:
 236        *data = gicv3_idreg(offset - GICR_IDREGS);
 237        return MEMTX_OK;
 238    default:
 239        return MEMTX_ERROR;
 240    }
 241}
 242
 243static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
 244                               uint64_t value, MemTxAttrs attrs)
 245{
 246    switch (offset) {
 247    case GICR_CTLR:
 248        /* For our implementation, GICR_TYPER.DPGS is 0 and so all
 249         * the DPG bits are RAZ/WI. We don't do anything asynchronously,
 250         * so UWP and RWP are RAZ/WI. And GICR_TYPER.LPIS is 0 (we don't
 251         * implement LPIs) so Enable_LPIs is RES0. So there are no writable
 252         * bits for us.
 253         */
 254        return MEMTX_OK;
 255    case GICR_STATUSR:
 256        /* RAZ/WI for our implementation */
 257        return MEMTX_OK;
 258    case GICR_WAKER:
 259        /* Only the ProcessorSleep bit is writeable. When the guest sets
 260         * it it requests that we transition the channel between the
 261         * redistributor and the cpu interface to quiescent, and that
 262         * we set the ChildrenAsleep bit once the inteface has reached the
 263         * quiescent state.
 264         * Setting the ProcessorSleep to 0 reverses the quiescing, and
 265         * ChildrenAsleep is cleared once the transition is complete.
 266         * Since our interface is not asynchronous, we complete these
 267         * transitions instantaneously, so we set ChildrenAsleep to the
 268         * same value as ProcessorSleep here.
 269         */
 270        value &= GICR_WAKER_ProcessorSleep;
 271        if (value & GICR_WAKER_ProcessorSleep) {
 272            value |= GICR_WAKER_ChildrenAsleep;
 273        }
 274        cs->gicr_waker = value;
 275        return MEMTX_OK;
 276    case GICR_PROPBASER:
 277        cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 0, 32, value);
 278        return MEMTX_OK;
 279    case GICR_PROPBASER + 4:
 280        cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 32, 32, value);
 281        return MEMTX_OK;
 282    case GICR_PENDBASER:
 283        cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 0, 32, value);
 284        return MEMTX_OK;
 285    case GICR_PENDBASER + 4:
 286        cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 32, 32, value);
 287        return MEMTX_OK;
 288    case GICR_IGROUPR0:
 289        if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
 290            return MEMTX_OK;
 291        }
 292        cs->gicr_igroupr0 = value;
 293        gicv3_redist_update(cs);
 294        return MEMTX_OK;
 295    case GICR_ISENABLER0:
 296        gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
 297        return MEMTX_OK;
 298    case GICR_ICENABLER0:
 299        gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
 300        return MEMTX_OK;
 301    case GICR_ISPENDR0:
 302        gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
 303        return MEMTX_OK;
 304    case GICR_ICPENDR0:
 305        gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
 306        return MEMTX_OK;
 307    case GICR_ISACTIVER0:
 308        gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
 309        return MEMTX_OK;
 310    case GICR_ICACTIVER0:
 311        gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
 312        return MEMTX_OK;
 313    case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
 314    {
 315        int i, irq = offset - GICR_IPRIORITYR;
 316
 317        for (i = irq; i < irq + 4; i++, value >>= 8) {
 318            gicr_write_ipriorityr(cs, attrs, i, value);
 319        }
 320        gicv3_redist_update(cs);
 321        return MEMTX_OK;
 322    }
 323    case GICR_ICFGR0:
 324        /* Register is all RAZ/WI or RAO/WI bits */
 325        return MEMTX_OK;
 326    case GICR_ICFGR1:
 327    {
 328        uint32_t mask;
 329
 330        /* Since our edge_trigger bitmap is one bit per irq, our input
 331         * 32-bits will compress down into 16 bits which we need
 332         * to write into the bitmap.
 333         */
 334        value = half_unshuffle32(value >> 1) << 16;
 335        mask = mask_group(cs, attrs) & 0xffff0000U;
 336
 337        cs->edge_trigger &= ~mask;
 338        cs->edge_trigger |= (value & mask);
 339
 340        gicv3_redist_update(cs);
 341        return MEMTX_OK;
 342    }
 343    case GICR_IGRPMODR0:
 344        if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
 345            /* RAZ/WI if security disabled, or if
 346             * security enabled and this is an NS access
 347             */
 348            return MEMTX_OK;
 349        }
 350        cs->gicr_igrpmodr0 = value;
 351        gicv3_redist_update(cs);
 352        return MEMTX_OK;
 353    case GICR_NSACR:
 354        if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
 355            /* RAZ/WI if security disabled, or if
 356             * security enabled and this is an NS access
 357             */
 358            return MEMTX_OK;
 359        }
 360        cs->gicr_nsacr = value;
 361        /* no update required as this only affects access permission checks */
 362        return MEMTX_OK;
 363    case GICR_IIDR:
 364    case GICR_TYPER:
 365    case GICR_IDREGS ... GICR_IDREGS + 0x1f:
 366        /* RO registers, ignore the write */
 367        qemu_log_mask(LOG_GUEST_ERROR,
 368                      "%s: invalid guest write to RO register at offset "
 369                      TARGET_FMT_plx "\n", __func__, offset);
 370        return MEMTX_OK;
 371    default:
 372        return MEMTX_ERROR;
 373    }
 374}
 375
 376static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset,
 377                               uint64_t *data, MemTxAttrs attrs)
 378{
 379    switch (offset) {
 380    case GICR_TYPER:
 381        *data = cs->gicr_typer;
 382        return MEMTX_OK;
 383    case GICR_PROPBASER:
 384        *data = cs->gicr_propbaser;
 385        return MEMTX_OK;
 386    case GICR_PENDBASER:
 387        *data = cs->gicr_pendbaser;
 388        return MEMTX_OK;
 389    default:
 390        return MEMTX_ERROR;
 391    }
 392}
 393
 394static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset,
 395                                uint64_t value, MemTxAttrs attrs)
 396{
 397    switch (offset) {
 398    case GICR_PROPBASER:
 399        cs->gicr_propbaser = value;
 400        return MEMTX_OK;
 401    case GICR_PENDBASER:
 402        cs->gicr_pendbaser = value;
 403        return MEMTX_OK;
 404    case GICR_TYPER:
 405        /* RO register, ignore the write */
 406        qemu_log_mask(LOG_GUEST_ERROR,
 407                      "%s: invalid guest write to RO register at offset "
 408                      TARGET_FMT_plx "\n", __func__, offset);
 409        return MEMTX_OK;
 410    default:
 411        return MEMTX_ERROR;
 412    }
 413}
 414
 415MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
 416                              unsigned size, MemTxAttrs attrs)
 417{
 418    GICv3State *s = opaque;
 419    GICv3CPUState *cs;
 420    MemTxResult r;
 421    int cpuidx;
 422
 423    assert((offset & (size - 1)) == 0);
 424
 425    /* This region covers all the redistributor pages; there are
 426     * (for GICv3) two 64K pages per CPU. At the moment they are
 427     * all contiguous (ie in this one region), though we might later
 428     * want to allow splitting of redistributor pages into several
 429     * blocks so we can support more CPUs.
 430     */
 431    cpuidx = offset / 0x20000;
 432    offset %= 0x20000;
 433    assert(cpuidx < s->num_cpu);
 434
 435    cs = &s->cpu[cpuidx];
 436
 437    switch (size) {
 438    case 1:
 439        r = gicr_readb(cs, offset, data, attrs);
 440        break;
 441    case 4:
 442        r = gicr_readl(cs, offset, data, attrs);
 443        break;
 444    case 8:
 445        r = gicr_readll(cs, offset, data, attrs);
 446        break;
 447    default:
 448        r = MEMTX_ERROR;
 449        break;
 450    }
 451
 452    if (r == MEMTX_ERROR) {
 453        qemu_log_mask(LOG_GUEST_ERROR,
 454                      "%s: invalid guest read at offset " TARGET_FMT_plx
 455                      "size %u\n", __func__, offset, size);
 456        trace_gicv3_redist_badread(gicv3_redist_affid(cs), offset,
 457                                   size, attrs.secure);
 458        /* The spec requires that reserved registers are RAZ/WI;
 459         * so use MEMTX_ERROR returns from leaf functions as a way to
 460         * trigger the guest-error logging but don't return it to
 461         * the caller, or we'll cause a spurious guest data abort.
 462         */
 463        r = MEMTX_OK;
 464        *data = 0;
 465    } else {
 466        trace_gicv3_redist_read(gicv3_redist_affid(cs), offset, *data,
 467                                size, attrs.secure);
 468    }
 469    return r;
 470}
 471
 472MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
 473                               unsigned size, MemTxAttrs attrs)
 474{
 475    GICv3State *s = opaque;
 476    GICv3CPUState *cs;
 477    MemTxResult r;
 478    int cpuidx;
 479
 480    assert((offset & (size - 1)) == 0);
 481
 482    /* This region covers all the redistributor pages; there are
 483     * (for GICv3) two 64K pages per CPU. At the moment they are
 484     * all contiguous (ie in this one region), though we might later
 485     * want to allow splitting of redistributor pages into several
 486     * blocks so we can support more CPUs.
 487     */
 488    cpuidx = offset / 0x20000;
 489    offset %= 0x20000;
 490    assert(cpuidx < s->num_cpu);
 491
 492    cs = &s->cpu[cpuidx];
 493
 494    switch (size) {
 495    case 1:
 496        r = gicr_writeb(cs, offset, data, attrs);
 497        break;
 498    case 4:
 499        r = gicr_writel(cs, offset, data, attrs);
 500        break;
 501    case 8:
 502        r = gicr_writell(cs, offset, data, attrs);
 503        break;
 504    default:
 505        r = MEMTX_ERROR;
 506        break;
 507    }
 508
 509    if (r == MEMTX_ERROR) {
 510        qemu_log_mask(LOG_GUEST_ERROR,
 511                      "%s: invalid guest write at offset " TARGET_FMT_plx
 512                      "size %u\n", __func__, offset, size);
 513        trace_gicv3_redist_badwrite(gicv3_redist_affid(cs), offset, data,
 514                                    size, attrs.secure);
 515        /* The spec requires that reserved registers are RAZ/WI;
 516         * so use MEMTX_ERROR returns from leaf functions as a way to
 517         * trigger the guest-error logging but don't return it to
 518         * the caller, or we'll cause a spurious guest data abort.
 519         */
 520        r = MEMTX_OK;
 521    } else {
 522        trace_gicv3_redist_write(gicv3_redist_affid(cs), offset, data,
 523                                 size, attrs.secure);
 524    }
 525    return r;
 526}
 527
 528void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level)
 529{
 530    /* Update redistributor state for a change in an external PPI input line */
 531    if (level == extract32(cs->level, irq, 1)) {
 532        return;
 533    }
 534
 535    trace_gicv3_redist_set_irq(gicv3_redist_affid(cs), irq, level);
 536
 537    cs->level = deposit32(cs->level, irq, 1, level);
 538
 539    if (level) {
 540        /* 0->1 edges latch the pending bit for edge-triggered interrupts */
 541        if (extract32(cs->edge_trigger, irq, 1)) {
 542            cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
 543        }
 544    }
 545
 546    gicv3_redist_update(cs);
 547}
 548
 549void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns)
 550{
 551    /* Update redistributor state for a generated SGI */
 552    int irqgrp = gicv3_irq_group(cs->gic, cs, irq);
 553
 554    /* If we are asked for a Secure Group 1 SGI and it's actually
 555     * configured as Secure Group 0 this is OK (subject to the usual
 556     * NSACR checks).
 557     */
 558    if (grp == GICV3_G1 && irqgrp == GICV3_G0) {
 559        grp = GICV3_G0;
 560    }
 561
 562    if (grp != irqgrp) {
 563        return;
 564    }
 565
 566    if (ns && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
 567        /* If security is enabled we must test the NSACR bits */
 568        int nsaccess = gicr_ns_access(cs, irq);
 569
 570        if ((irqgrp == GICV3_G0 && nsaccess < 1) ||
 571            (irqgrp == GICV3_G1 && nsaccess < 2)) {
 572            return;
 573        }
 574    }
 575
 576    /* OK, we can accept the SGI */
 577    trace_gicv3_redist_send_sgi(gicv3_redist_affid(cs), irq);
 578    cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
 579    gicv3_redist_update(cs);
 580}
 581