qemu/hw/intc/pnv_xive2.c
<<
>>
Prefs
   1/*
   2 * QEMU PowerPC XIVE2 interrupt controller model  (POWER10)
   3 *
   4 * Copyright (c) 2019-2022, IBM Corporation.
   5 *
   6 * This code is licensed under the GPL version 2 or later. See the
   7 * COPYING file in the top-level directory.
   8 */
   9
  10#include "qemu/osdep.h"
  11#include "qemu/log.h"
  12#include "qapi/error.h"
  13#include "target/ppc/cpu.h"
  14#include "sysemu/cpus.h"
  15#include "sysemu/dma.h"
  16#include "monitor/monitor.h"
  17#include "hw/ppc/fdt.h"
  18#include "hw/ppc/pnv.h"
  19#include "hw/ppc/pnv_core.h"
  20#include "hw/ppc/pnv_xscom.h"
  21#include "hw/ppc/xive2.h"
  22#include "hw/ppc/pnv_xive.h"
  23#include "hw/ppc/xive_regs.h"
  24#include "hw/ppc/xive2_regs.h"
  25#include "hw/ppc/ppc.h"
  26#include "hw/qdev-properties.h"
  27#include "sysemu/reset.h"
  28
  29#include <libfdt.h>
  30
  31#include "pnv_xive2_regs.h"
  32
  33#undef XIVE2_DEBUG
  34
  35/*
  36 * Virtual structures table (VST)
  37 */
  38#define SBE_PER_BYTE   4
  39
  40typedef struct XiveVstInfo {
  41    const char *name;
  42    uint32_t    size;
  43    uint32_t    max_blocks;
  44} XiveVstInfo;
  45
  46static const XiveVstInfo vst_infos[] = {
  47
  48    [VST_EAS]  = { "EAT",  sizeof(Xive2Eas),  16 },
  49    [VST_ESB]  = { "ESB",  1,                  16 },
  50    [VST_END]  = { "ENDT", sizeof(Xive2End),  16 },
  51
  52    [VST_NVP]  = { "NVPT", sizeof(Xive2Nvp),  16 },
  53    [VST_NVG]  = { "NVGT", sizeof(Xive2Nvgc), 16 },
  54    [VST_NVC]  = { "NVCT", sizeof(Xive2Nvgc), 16 },
  55
  56    [VST_IC]  =  { "IC",   1 /* ? */         , 16 }, /* Topology # */
  57    [VST_SYNC] = { "SYNC", 1 /* ? */         , 16 }, /* Topology # */
  58
  59    /*
  60     * This table contains the backing store pages for the interrupt
  61     * fifos of the VC sub-engine in case of overflow.
  62     *
  63     * 0 - IPI,
  64     * 1 - HWD,
  65     * 2 - NxC,
  66     * 3 - INT,
  67     * 4 - OS-Queue,
  68     * 5 - Pool-Queue,
  69     * 6 - Hard-Queue
  70     */
  71    [VST_ERQ]  = { "ERQ",  1,                   VC_QUEUE_COUNT },
  72};
  73
  74#define xive2_error(xive, fmt, ...)                                      \
  75    qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
  76                  (xive)->chip->chip_id, ## __VA_ARGS__);
  77
  78/*
  79 * TODO: Document block id override
  80 */
  81static uint32_t pnv_xive2_block_id(PnvXive2 *xive)
  82{
  83    uint8_t blk = xive->chip->chip_id;
  84    uint64_t cfg_val = xive->cq_regs[CQ_XIVE_CFG >> 3];
  85
  86    if (cfg_val & CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE) {
  87        blk = GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, cfg_val);
  88    }
  89
  90    return blk;
  91}
  92
  93/*
  94 * Remote access to controllers. HW uses MMIOs. For now, a simple scan
  95 * of the chips is good enough.
  96 *
  97 * TODO: Block scope support
  98 */
  99static PnvXive2 *pnv_xive2_get_remote(uint8_t blk)
 100{
 101    PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
 102    int i;
 103
 104    for (i = 0; i < pnv->num_chips; i++) {
 105        Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
 106        PnvXive2 *xive = &chip10->xive;
 107
 108        if (pnv_xive2_block_id(xive) == blk) {
 109            return xive;
 110        }
 111    }
 112    return NULL;
 113}
 114
 115/*
 116 * VST accessors for ESB, EAT, ENDT, NVP
 117 *
 118 * Indirect VST tables are arrays of VSDs pointing to a page (of same
 119 * size). Each page is a direct VST table.
 120 */
 121
 122#define XIVE_VSD_SIZE 8
 123
 124/* Indirect page size can be 4K, 64K, 2M, 16M. */
 125static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift)
 126{
 127     return page_shift == 12 || page_shift == 16 ||
 128         page_shift == 21 || page_shift == 24;
 129}
 130
 131static uint64_t pnv_xive2_vst_addr_direct(PnvXive2 *xive, uint32_t type,
 132                                          uint64_t vsd, uint32_t idx)
 133{
 134    const XiveVstInfo *info = &vst_infos[type];
 135    uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
 136    uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
 137    uint32_t idx_max;
 138
 139    idx_max = vst_tsize / info->size - 1;
 140    if (idx > idx_max) {
 141#ifdef XIVE2_DEBUG
 142        xive2_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
 143                   info->name, idx, idx_max);
 144#endif
 145        return 0;
 146    }
 147
 148    return vst_addr + idx * info->size;
 149}
 150
 151static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
 152                                            uint64_t vsd, uint32_t idx)
 153{
 154    const XiveVstInfo *info = &vst_infos[type];
 155    uint64_t vsd_addr;
 156    uint32_t vsd_idx;
 157    uint32_t page_shift;
 158    uint32_t vst_per_page;
 159
 160    /* Get the page size of the indirect table. */
 161    vsd_addr = vsd & VSD_ADDRESS_MASK;
 162    ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
 163
 164    if (!(vsd & VSD_ADDRESS_MASK)) {
 165        xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
 166        return 0;
 167    }
 168
 169    page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
 170
 171    if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
 172        xive2_error(xive, "VST: invalid %s page shift %d", info->name,
 173                   page_shift);
 174        return 0;
 175    }
 176
 177    vst_per_page = (1ull << page_shift) / info->size;
 178    vsd_idx = idx / vst_per_page;
 179
 180    /* Load the VSD we are looking for, if not already done */
 181    if (vsd_idx) {
 182        vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
 183        ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
 184                   MEMTXATTRS_UNSPECIFIED);
 185
 186        if (!(vsd & VSD_ADDRESS_MASK)) {
 187            xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
 188            return 0;
 189        }
 190
 191        /*
 192         * Check that the pages have a consistent size across the
 193         * indirect table
 194         */
 195        if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
 196            xive2_error(xive, "VST: %s entry %x indirect page size differ !?",
 197                       info->name, idx);
 198            return 0;
 199        }
 200    }
 201
 202    return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
 203}
 204
 205static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
 206                                   uint32_t idx)
 207{
 208    const XiveVstInfo *info = &vst_infos[type];
 209    uint64_t vsd;
 210
 211    if (blk >= info->max_blocks) {
 212        xive2_error(xive, "VST: invalid block id %d for VST %s %d !?",
 213                   blk, info->name, idx);
 214        return 0;
 215    }
 216
 217    vsd = xive->vsds[type][blk];
 218
 219    /* Remote VST access */
 220    if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
 221        xive = pnv_xive2_get_remote(blk);
 222
 223        return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0;
 224    }
 225
 226    if (VSD_INDIRECT & vsd) {
 227        return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx);
 228    }
 229
 230    return pnv_xive2_vst_addr_direct(xive, type, vsd, idx);
 231}
 232
 233static int pnv_xive2_vst_read(PnvXive2 *xive, uint32_t type, uint8_t blk,
 234                             uint32_t idx, void *data)
 235{
 236    const XiveVstInfo *info = &vst_infos[type];
 237    uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
 238
 239    if (!addr) {
 240        return -1;
 241    }
 242
 243    cpu_physical_memory_read(addr, data, info->size);
 244    return 0;
 245}
 246
 247#define XIVE_VST_WORD_ALL -1
 248
 249static int pnv_xive2_vst_write(PnvXive2 *xive, uint32_t type, uint8_t blk,
 250                               uint32_t idx, void *data, uint32_t word_number)
 251{
 252    const XiveVstInfo *info = &vst_infos[type];
 253    uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
 254
 255    if (!addr) {
 256        return -1;
 257    }
 258
 259    if (word_number == XIVE_VST_WORD_ALL) {
 260        cpu_physical_memory_write(addr, data, info->size);
 261    } else {
 262        cpu_physical_memory_write(addr + word_number * 4,
 263                                  data + word_number * 4, 4);
 264    }
 265    return 0;
 266}
 267
 268static int pnv_xive2_get_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
 269                             uint8_t *pq)
 270{
 271    PnvXive2 *xive = PNV_XIVE2(xrtr);
 272
 273    if (pnv_xive2_block_id(xive) != blk) {
 274        xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
 275        return -1;
 276    }
 277
 278    *pq = xive_source_esb_get(&xive->ipi_source, idx);
 279    return 0;
 280}
 281
 282static int pnv_xive2_set_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
 283                             uint8_t *pq)
 284{
 285    PnvXive2 *xive = PNV_XIVE2(xrtr);
 286
 287    if (pnv_xive2_block_id(xive) != blk) {
 288        xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
 289        return -1;
 290    }
 291
 292    *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
 293    return 0;
 294}
 295
 296static int pnv_xive2_get_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
 297                             Xive2End *end)
 298{
 299    return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_END, blk, idx, end);
 300}
 301
 302static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
 303                               Xive2End *end, uint8_t word_number)
 304{
 305    return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_END, blk, idx, end,
 306                              word_number);
 307}
 308
 309static int pnv_xive2_end_update(PnvXive2 *xive)
 310{
 311    uint8_t  blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID,
 312                           xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
 313    uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX,
 314                           xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
 315    int i;
 316    uint64_t endc_watch[4];
 317
 318    for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
 319        endc_watch[i] =
 320            cpu_to_be64(xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i]);
 321    }
 322
 323    return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch,
 324                              XIVE_VST_WORD_ALL);
 325}
 326
 327static void pnv_xive2_end_cache_load(PnvXive2 *xive)
 328{
 329    uint8_t  blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID,
 330                           xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
 331    uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX,
 332                           xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
 333    uint64_t endc_watch[4] = { 0 };
 334    int i;
 335
 336    if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) {
 337        xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx);
 338    }
 339
 340    for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
 341        xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i] =
 342            be64_to_cpu(endc_watch[i]);
 343    }
 344}
 345
 346static int pnv_xive2_get_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
 347                             Xive2Nvp *nvp)
 348{
 349    return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp);
 350}
 351
 352static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
 353                               Xive2Nvp *nvp, uint8_t word_number)
 354{
 355    return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp,
 356                              word_number);
 357}
 358
 359static int pnv_xive2_nvp_update(PnvXive2 *xive)
 360{
 361    uint8_t  blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID,
 362                            xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
 363    uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX,
 364                            xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
 365    int i;
 366    uint64_t nxc_watch[4];
 367
 368    for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
 369        nxc_watch[i] =
 370            cpu_to_be64(xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i]);
 371    }
 372
 373    return pnv_xive2_vst_write(xive, VST_NVP, blk, idx, nxc_watch,
 374                              XIVE_VST_WORD_ALL);
 375}
 376
 377static void pnv_xive2_nvp_cache_load(PnvXive2 *xive)
 378{
 379    uint8_t  blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID,
 380                           xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
 381    uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX,
 382                           xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
 383    uint64_t nxc_watch[4] = { 0 };
 384    int i;
 385
 386    if (pnv_xive2_vst_read(xive, VST_NVP, blk, idx, nxc_watch)) {
 387        xive2_error(xive, "VST: no NVP entry %x/%x !?", blk, idx);
 388    }
 389
 390    for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
 391        xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i] =
 392            be64_to_cpu(nxc_watch[i]);
 393    }
 394}
 395
 396static int pnv_xive2_get_eas(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
 397                            Xive2Eas *eas)
 398{
 399    PnvXive2 *xive = PNV_XIVE2(xrtr);
 400
 401    if (pnv_xive2_block_id(xive) != blk) {
 402        xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
 403        return -1;
 404    }
 405
 406    return pnv_xive2_vst_read(xive, VST_EAS, blk, idx, eas);
 407}
 408
 409static uint32_t pnv_xive2_get_config(Xive2Router *xrtr)
 410{
 411    PnvXive2 *xive = PNV_XIVE2(xrtr);
 412    uint32_t cfg = 0;
 413
 414    if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
 415        cfg |= XIVE2_GEN1_TIMA_OS;
 416    }
 417
 418    if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) {
 419        cfg |= XIVE2_VP_SAVE_RESTORE;
 420    }
 421
 422    if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE,
 423              xive->cq_regs[CQ_XIVE_CFG >> 3]) == CQ_XIVE_CFG_THREADID_8BITS) {
 424        cfg |= XIVE2_THREADID_8BITS;
 425    }
 426
 427    return cfg;
 428}
 429
 430static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu)
 431{
 432    int pir = ppc_cpu_pir(cpu);
 433    uint32_t fc = PNV10_PIR2FUSEDCORE(pir);
 434    uint64_t reg = fc < 8 ? TCTXT_EN0 : TCTXT_EN1;
 435    uint32_t bit = pir & 0x3f;
 436
 437    return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit);
 438}
 439
 440static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
 441                               uint8_t nvt_blk, uint32_t nvt_idx,
 442                               bool cam_ignore, uint8_t priority,
 443                               uint32_t logic_serv, XiveTCTXMatch *match)
 444{
 445    PnvXive2 *xive = PNV_XIVE2(xptr);
 446    PnvChip *chip = xive->chip;
 447    int count = 0;
 448    int i, j;
 449    bool gen1_tima_os =
 450        xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
 451
 452    for (i = 0; i < chip->nr_cores; i++) {
 453        PnvCore *pc = chip->cores[i];
 454        CPUCore *cc = CPU_CORE(pc);
 455
 456        for (j = 0; j < cc->nr_threads; j++) {
 457            PowerPCCPU *cpu = pc->threads[j];
 458            XiveTCTX *tctx;
 459            int ring;
 460
 461            if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
 462                continue;
 463            }
 464
 465            tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
 466
 467            if (gen1_tima_os) {
 468                ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
 469                                                 nvt_idx, cam_ignore,
 470                                                 logic_serv);
 471            } else {
 472                ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk,
 473                                                   nvt_idx, cam_ignore,
 474                                                   logic_serv);
 475            }
 476
 477            /*
 478             * Save the context and follow on to catch duplicates,
 479             * that we don't support yet.
 480             */
 481            if (ring != -1) {
 482                if (match->tctx) {
 483                    qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
 484                                  "thread context NVT %x/%x\n",
 485                                  nvt_blk, nvt_idx);
 486                    return false;
 487                }
 488
 489                match->ring = ring;
 490                match->tctx = tctx;
 491                count++;
 492            }
 493        }
 494    }
 495
 496    return count;
 497}
 498
 499static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
 500{
 501    return pnv_xive2_block_id(PNV_XIVE2(xrtr));
 502}
 503
 504/*
 505 * The TIMA MMIO space is shared among the chips and to identify the
 506 * chip from which the access is being done, we extract the chip id
 507 * from the PIR.
 508 */
 509static PnvXive2 *pnv_xive2_tm_get_xive(PowerPCCPU *cpu)
 510{
 511    int pir = ppc_cpu_pir(cpu);
 512    XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
 513    PnvXive2 *xive = PNV_XIVE2(xptr);
 514
 515    if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
 516        xive2_error(xive, "IC: CPU %x is not enabled", pir);
 517    }
 518    return xive;
 519}
 520
 521/*
 522 * The internal sources of the interrupt controller have no knowledge
 523 * of the XIVE2 chip on which they reside. Encode the block id in the
 524 * source interrupt number before forwarding the source event
 525 * notification to the Router. This is required on a multichip system.
 526 */
 527static void pnv_xive2_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
 528{
 529    PnvXive2 *xive = PNV_XIVE2(xn);
 530    uint8_t blk = pnv_xive2_block_id(xive);
 531
 532    xive2_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
 533}
 534
 535/*
 536 * Set Translation Tables
 537 *
 538 * TODO add support for multiple sets
 539 */
 540static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val)
 541{
 542    uint8_t tsel = GETFIELD(CQ_TAR_SELECT, xive->cq_regs[CQ_TAR >> 3]);
 543    uint8_t entry = GETFIELD(CQ_TAR_ENTRY_SELECT,
 544                                  xive->cq_regs[CQ_TAR >> 3]);
 545
 546    switch (tsel) {
 547    case CQ_TAR_NVPG:
 548    case CQ_TAR_ESB:
 549    case CQ_TAR_END:
 550        xive->tables[tsel][entry] = val;
 551        break;
 552    default:
 553        xive2_error(xive, "IC: unsupported table %d", tsel);
 554        return -1;
 555    }
 556
 557    if (xive->cq_regs[CQ_TAR >> 3] & CQ_TAR_AUTOINC) {
 558        xive->cq_regs[CQ_TAR >> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT,
 559                     xive->cq_regs[CQ_TAR >> 3], ++entry);
 560    }
 561
 562    return 0;
 563}
 564/*
 565 * Virtual Structure Tables (VST) configuration
 566 */
 567static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
 568                                        uint8_t blk, uint64_t vsd)
 569{
 570    Xive2EndSource *end_xsrc = &xive->end_source;
 571    XiveSource *xsrc = &xive->ipi_source;
 572    const XiveVstInfo *info = &vst_infos[type];
 573    uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
 574    uint64_t vst_tsize = 1ull << page_shift;
 575    uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
 576
 577    /* Basic checks */
 578
 579    if (VSD_INDIRECT & vsd) {
 580        if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
 581            xive2_error(xive, "VST: invalid %s page shift %d", info->name,
 582                       page_shift);
 583            return;
 584        }
 585    }
 586
 587    if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
 588        xive2_error(xive, "VST: %s table address 0x%"PRIx64
 589                    " is not aligned with page shift %d",
 590                    info->name, vst_addr, page_shift);
 591        return;
 592    }
 593
 594    /* Record the table configuration (in SRAM on HW) */
 595    xive->vsds[type][blk] = vsd;
 596
 597    /* Now tune the models with the configuration provided by the FW */
 598
 599    switch (type) {
 600    case VST_ESB:
 601        /*
 602         * Backing store pages for the source PQ bits. The model does
 603         * not use these PQ bits backed in RAM because the XiveSource
 604         * model has its own.
 605         *
 606         * If the table is direct, we can compute the number of PQ
 607         * entries provisioned by FW (such as skiboot) and resize the
 608         * ESB window accordingly.
 609         */
 610        if (!(VSD_INDIRECT & vsd)) {
 611            memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
 612                                   * (1ull << xsrc->esb_shift));
 613        }
 614
 615        memory_region_add_subregion(&xive->esb_mmio, 0, &xsrc->esb_mmio);
 616        break;
 617
 618    case VST_EAS:  /* Nothing to be done */
 619        break;
 620
 621    case VST_END:
 622        /*
 623         * Backing store pages for the END.
 624         */
 625        if (!(VSD_INDIRECT & vsd)) {
 626            memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
 627                                   * (1ull << end_xsrc->esb_shift));
 628        }
 629        memory_region_add_subregion(&xive->end_mmio, 0, &end_xsrc->esb_mmio);
 630        break;
 631
 632    case VST_NVP:  /* Not modeled */
 633    case VST_NVG:  /* Not modeled */
 634    case VST_NVC:  /* Not modeled */
 635    case VST_IC:   /* Not modeled */
 636    case VST_SYNC: /* Not modeled */
 637    case VST_ERQ:  /* Not modeled */
 638        break;
 639
 640    default:
 641        g_assert_not_reached();
 642    }
 643}
 644
 645/*
 646 * Both PC and VC sub-engines are configured as each use the Virtual
 647 * Structure Tables
 648 */
 649static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd)
 650{
 651    uint8_t mode = GETFIELD(VSD_MODE, vsd);
 652    uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
 653                            xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
 654    uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
 655                           xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
 656    uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
 657
 658    if (type > VST_ERQ) {
 659        xive2_error(xive, "VST: invalid table type %d", type);
 660        return;
 661    }
 662
 663    if (blk >= vst_infos[type].max_blocks) {
 664        xive2_error(xive, "VST: invalid block id %d for"
 665                      " %s table", blk, vst_infos[type].name);
 666        return;
 667    }
 668
 669    if (!vst_addr) {
 670        xive2_error(xive, "VST: invalid %s table address",
 671                   vst_infos[type].name);
 672        return;
 673    }
 674
 675    switch (mode) {
 676    case VSD_MODE_FORWARD:
 677        xive->vsds[type][blk] = vsd;
 678        break;
 679
 680    case VSD_MODE_EXCLUSIVE:
 681        pnv_xive2_vst_set_exclusive(xive, type, blk, vsd);
 682        break;
 683
 684    default:
 685        xive2_error(xive, "VST: unsupported table mode %d", mode);
 686        return;
 687    }
 688}
 689
 690/*
 691 * MMIO handlers
 692 */
 693
 694
 695/*
 696 * IC BAR layout
 697 *
 698 * Page 0: Internal CQ register accesses (reads & writes)
 699 * Page 1: Internal PC register accesses (reads & writes)
 700 * Page 2: Internal VC register accesses (reads & writes)
 701 * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
 702 * Page 4: Notify Port page (writes only, w/data),
 703 * Page 5: Reserved
 704 * Page 6: Sync Poll page (writes only, dataless)
 705 * Page 7: Sync Inject page (writes only, dataless)
 706 * Page 8: LSI Trigger page (writes only, dataless)
 707 * Page 9: LSI SB Management page (reads & writes dataless)
 708 * Pages 10-255: Reserved
 709 * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
 710 *                covering the 128 threads in P10.
 711 * Pages 384-511: Reserved
 712 */
 713typedef struct PnvXive2Region {
 714    const char *name;
 715    uint32_t pgoff;
 716    uint32_t pgsize;
 717    const MemoryRegionOps *ops;
 718} PnvXive2Region;
 719
 720static const MemoryRegionOps pnv_xive2_ic_cq_ops;
 721static const MemoryRegionOps pnv_xive2_ic_pc_ops;
 722static const MemoryRegionOps pnv_xive2_ic_vc_ops;
 723static const MemoryRegionOps pnv_xive2_ic_tctxt_ops;
 724static const MemoryRegionOps pnv_xive2_ic_notify_ops;
 725static const MemoryRegionOps pnv_xive2_ic_sync_ops;
 726static const MemoryRegionOps pnv_xive2_ic_lsi_ops;
 727static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops;
 728
 729/* 512 pages. 4K: 2M range, 64K: 32M range */
 730static const PnvXive2Region pnv_xive2_ic_regions[] = {
 731    { "xive-ic-cq",        0,   1,   &pnv_xive2_ic_cq_ops     },
 732    { "xive-ic-vc",        1,   1,   &pnv_xive2_ic_vc_ops     },
 733    { "xive-ic-pc",        2,   1,   &pnv_xive2_ic_pc_ops     },
 734    { "xive-ic-tctxt",     3,   1,   &pnv_xive2_ic_tctxt_ops  },
 735    { "xive-ic-notify",    4,   1,   &pnv_xive2_ic_notify_ops },
 736    /* page 5 reserved */
 737    { "xive-ic-sync",      6,   2,   &pnv_xive2_ic_sync_ops   },
 738    { "xive-ic-lsi",       8,   2,   &pnv_xive2_ic_lsi_ops    },
 739    /* pages 10-255 reserved */
 740    { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops  },
 741    /* pages 384-511 reserved */
 742};
 743
 744/*
 745 * CQ operations
 746 */
 747
 748static uint64_t pnv_xive2_ic_cq_read(void *opaque, hwaddr offset,
 749                                        unsigned size)
 750{
 751    PnvXive2 *xive = PNV_XIVE2(opaque);
 752    uint32_t reg = offset >> 3;
 753    uint64_t val = 0;
 754
 755    switch (offset) {
 756    case CQ_XIVE_CAP: /* Set at reset */
 757    case CQ_XIVE_CFG:
 758        val = xive->cq_regs[reg];
 759        break;
 760    case CQ_MSGSND: /* TODO check the #cores of the machine */
 761        val = 0xffffffff00000000;
 762        break;
 763    case CQ_CFG_PB_GEN:
 764        val = CQ_CFG_PB_GEN_PB_INIT; /* TODO: fix CQ_CFG_PB_GEN default value */
 765        break;
 766    default:
 767        xive2_error(xive, "CQ: invalid read @%"HWADDR_PRIx, offset);
 768    }
 769
 770    return val;
 771}
 772
 773static uint64_t pnv_xive2_bar_size(uint64_t val)
 774{
 775    return 1ull << (GETFIELD(CQ_BAR_RANGE, val) + 24);
 776}
 777
 778static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset,
 779                                  uint64_t val, unsigned size)
 780{
 781    PnvXive2 *xive = PNV_XIVE2(opaque);
 782    MemoryRegion *sysmem = get_system_memory();
 783    uint32_t reg = offset >> 3;
 784    int i;
 785
 786    switch (offset) {
 787    case CQ_XIVE_CFG:
 788    case CQ_RST_CTL: /* TODO: reset all BARs */
 789        break;
 790
 791    case CQ_IC_BAR:
 792        xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
 793        if (!(val & CQ_IC_BAR_VALID)) {
 794            xive->ic_base = 0;
 795            if (xive->cq_regs[reg] & CQ_IC_BAR_VALID) {
 796                for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
 797                    memory_region_del_subregion(&xive->ic_mmio,
 798                                                &xive->ic_mmios[i]);
 799                }
 800                memory_region_del_subregion(sysmem, &xive->ic_mmio);
 801            }
 802        } else {
 803            xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
 804            if (!(xive->cq_regs[reg] & CQ_IC_BAR_VALID)) {
 805                for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
 806                    memory_region_add_subregion(&xive->ic_mmio,
 807                               pnv_xive2_ic_regions[i].pgoff << xive->ic_shift,
 808                               &xive->ic_mmios[i]);
 809                }
 810                memory_region_add_subregion(sysmem, xive->ic_base,
 811                                            &xive->ic_mmio);
 812            }
 813        }
 814        break;
 815
 816    case CQ_TM_BAR:
 817        xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
 818        if (!(val & CQ_TM_BAR_VALID)) {
 819            xive->tm_base = 0;
 820            if (xive->cq_regs[reg] & CQ_TM_BAR_VALID) {
 821                memory_region_del_subregion(sysmem, &xive->tm_mmio);
 822            }
 823        } else {
 824            xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
 825            if (!(xive->cq_regs[reg] & CQ_TM_BAR_VALID)) {
 826                memory_region_add_subregion(sysmem, xive->tm_base,
 827                                            &xive->tm_mmio);
 828            }
 829        }
 830        break;
 831
 832    case CQ_ESB_BAR:
 833        xive->esb_shift = val & CQ_BAR_64K ? 16 : 12;
 834        if (!(val & CQ_BAR_VALID)) {
 835            xive->esb_base = 0;
 836            if (xive->cq_regs[reg] & CQ_BAR_VALID) {
 837                memory_region_del_subregion(sysmem, &xive->esb_mmio);
 838            }
 839        } else {
 840            xive->esb_base = val & CQ_BAR_ADDR;
 841            if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
 842                memory_region_set_size(&xive->esb_mmio,
 843                                       pnv_xive2_bar_size(val));
 844                memory_region_add_subregion(sysmem, xive->esb_base,
 845                                            &xive->esb_mmio);
 846            }
 847        }
 848        break;
 849
 850    case CQ_END_BAR:
 851        xive->end_shift = val & CQ_BAR_64K ? 16 : 12;
 852        if (!(val & CQ_BAR_VALID)) {
 853            xive->end_base = 0;
 854            if (xive->cq_regs[reg] & CQ_BAR_VALID) {
 855                memory_region_del_subregion(sysmem, &xive->end_mmio);
 856            }
 857        } else {
 858            xive->end_base = val & CQ_BAR_ADDR;
 859            if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
 860                memory_region_set_size(&xive->end_mmio,
 861                                       pnv_xive2_bar_size(val));
 862                memory_region_add_subregion(sysmem, xive->end_base,
 863                                            &xive->end_mmio);
 864            }
 865        }
 866        break;
 867
 868    case CQ_NVC_BAR:
 869        xive->nvc_shift = val & CQ_BAR_64K ? 16 : 12;
 870        if (!(val & CQ_BAR_VALID)) {
 871            xive->nvc_base = 0;
 872            if (xive->cq_regs[reg] & CQ_BAR_VALID) {
 873                memory_region_del_subregion(sysmem, &xive->nvc_mmio);
 874            }
 875        } else {
 876            xive->nvc_base = val & CQ_BAR_ADDR;
 877            if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
 878                memory_region_set_size(&xive->nvc_mmio,
 879                                       pnv_xive2_bar_size(val));
 880                memory_region_add_subregion(sysmem, xive->nvc_base,
 881                                            &xive->nvc_mmio);
 882            }
 883        }
 884        break;
 885
 886    case CQ_NVPG_BAR:
 887        xive->nvpg_shift = val & CQ_BAR_64K ? 16 : 12;
 888        if (!(val & CQ_BAR_VALID)) {
 889            xive->nvpg_base = 0;
 890            if (xive->cq_regs[reg] & CQ_BAR_VALID) {
 891                memory_region_del_subregion(sysmem, &xive->nvpg_mmio);
 892            }
 893        } else {
 894            xive->nvpg_base = val & CQ_BAR_ADDR;
 895            if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
 896                memory_region_set_size(&xive->nvpg_mmio,
 897                                       pnv_xive2_bar_size(val));
 898                memory_region_add_subregion(sysmem, xive->nvpg_base,
 899                                            &xive->nvpg_mmio);
 900            }
 901        }
 902        break;
 903
 904    case CQ_TAR: /* Set Translation Table Address */
 905        break;
 906    case CQ_TDR: /* Set Translation Table Data */
 907        pnv_xive2_stt_set_data(xive, val);
 908        break;
 909    case CQ_FIRMASK_OR: /* FIR error reporting */
 910        break;
 911    default:
 912        xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset);
 913        return;
 914    }
 915
 916    xive->cq_regs[reg] = val;
 917}
 918
 919static const MemoryRegionOps pnv_xive2_ic_cq_ops = {
 920    .read = pnv_xive2_ic_cq_read,
 921    .write = pnv_xive2_ic_cq_write,
 922    .endianness = DEVICE_BIG_ENDIAN,
 923    .valid = {
 924        .min_access_size = 8,
 925        .max_access_size = 8,
 926    },
 927    .impl = {
 928        .min_access_size = 8,
 929        .max_access_size = 8,
 930    },
 931};
 932
 933static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
 934                                     unsigned size)
 935{
 936    PnvXive2 *xive = PNV_XIVE2(opaque);
 937    uint64_t val = 0;
 938    uint32_t reg = offset >> 3;
 939
 940    switch (offset) {
 941    /*
 942     * VSD table settings.
 943     */
 944    case VC_VSD_TABLE_ADDR:
 945    case VC_VSD_TABLE_DATA:
 946        val = xive->vc_regs[reg];
 947        break;
 948
 949    /*
 950     * ESB cache updates (not modeled)
 951     */
 952    case VC_ESBC_FLUSH_CTRL:
 953        xive->vc_regs[reg] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID;
 954        val = xive->vc_regs[reg];
 955        break;
 956
 957    /*
 958     * EAS cache updates (not modeled)
 959     */
 960    case VC_EASC_FLUSH_CTRL:
 961        xive->vc_regs[reg] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID;
 962        val = xive->vc_regs[reg];
 963        break;
 964
 965    /*
 966     * END cache updates
 967     */
 968    case VC_ENDC_WATCH0_SPEC:
 969        xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT);
 970        val = xive->vc_regs[reg];
 971        break;
 972
 973    case VC_ENDC_WATCH0_DATA0:
 974        /*
 975         * Load DATA registers from cache with data requested by the
 976         * SPEC register
 977         */
 978        pnv_xive2_end_cache_load(xive);
 979        val = xive->vc_regs[reg];
 980        break;
 981
 982    case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
 983        val = xive->vc_regs[reg];
 984        break;
 985
 986    case VC_ENDC_FLUSH_CTRL:
 987        xive->vc_regs[reg] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID;
 988        val = xive->vc_regs[reg];
 989        break;
 990
 991    /*
 992     * Indirect invalidation
 993     */
 994    case VC_AT_MACRO_KILL_MASK:
 995        val = xive->vc_regs[reg];
 996        break;
 997
 998    case VC_AT_MACRO_KILL:
 999        xive->vc_regs[reg] &= ~VC_AT_MACRO_KILL_VALID;
1000        val = xive->vc_regs[reg];
1001        break;
1002
1003    /*
1004     * Interrupt fifo overflow in memory backing store (Not modeled)
1005     */
1006    case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1007        val = xive->vc_regs[reg];
1008        break;
1009
1010    /*
1011     * Synchronisation
1012     */
1013    case VC_ENDC_SYNC_DONE:
1014        val = VC_ENDC_SYNC_POLL_DONE;
1015        break;
1016    default:
1017        xive2_error(xive, "VC: invalid read @%"HWADDR_PRIx, offset);
1018    }
1019
1020    return val;
1021}
1022
1023static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
1024                                  uint64_t val, unsigned size)
1025{
1026    PnvXive2 *xive = PNV_XIVE2(opaque);
1027    uint32_t reg = offset >> 3;
1028
1029    switch (offset) {
1030    /*
1031     * VSD table settings.
1032     */
1033    case VC_VSD_TABLE_ADDR:
1034       break;
1035    case VC_VSD_TABLE_DATA:
1036        pnv_xive2_vst_set_data(xive, val);
1037        break;
1038
1039    /*
1040     * ESB cache updates (not modeled)
1041     */
1042    /* case VC_ESBC_FLUSH_CTRL: */
1043    case VC_ESBC_FLUSH_POLL:
1044        xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID;
1045        /* ESB update */
1046        break;
1047
1048    /*
1049     * EAS cache updates (not modeled)
1050     */
1051    /* case VC_EASC_FLUSH_CTRL: */
1052    case VC_EASC_FLUSH_POLL:
1053        xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID;
1054        /* EAS update */
1055        break;
1056
1057    /*
1058     * END cache updates
1059     */
1060    case VC_ENDC_WATCH0_SPEC:
1061         val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */
1062        break;
1063
1064    case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1065        break;
1066    case VC_ENDC_WATCH0_DATA0:
1067        /* writing to DATA0 triggers the cache write */
1068        xive->vc_regs[reg] = val;
1069        pnv_xive2_end_update(xive);
1070        break;
1071
1072
1073    /* case VC_ENDC_FLUSH_CTRL: */
1074    case VC_ENDC_FLUSH_POLL:
1075        xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID;
1076        break;
1077
1078    /*
1079     * Indirect invalidation
1080     */
1081    case VC_AT_MACRO_KILL:
1082    case VC_AT_MACRO_KILL_MASK:
1083        break;
1084
1085    /*
1086     * Interrupt fifo overflow in memory backing store (Not modeled)
1087     */
1088    case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1089        break;
1090
1091    /*
1092     * Synchronisation
1093     */
1094    case VC_ENDC_SYNC_DONE:
1095        break;
1096
1097    default:
1098        xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset);
1099        return;
1100    }
1101
1102    xive->vc_regs[reg] = val;
1103}
1104
1105static const MemoryRegionOps pnv_xive2_ic_vc_ops = {
1106    .read = pnv_xive2_ic_vc_read,
1107    .write = pnv_xive2_ic_vc_write,
1108    .endianness = DEVICE_BIG_ENDIAN,
1109    .valid = {
1110        .min_access_size = 8,
1111        .max_access_size = 8,
1112    },
1113    .impl = {
1114        .min_access_size = 8,
1115        .max_access_size = 8,
1116    },
1117};
1118
1119static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
1120                                     unsigned size)
1121{
1122    PnvXive2 *xive = PNV_XIVE2(opaque);
1123    uint64_t val = -1;
1124    uint32_t reg = offset >> 3;
1125
1126    switch (offset) {
1127    /*
1128     * VSD table settings.
1129     */
1130    case PC_VSD_TABLE_ADDR:
1131    case PC_VSD_TABLE_DATA:
1132        val = xive->pc_regs[reg];
1133        break;
1134
1135    /*
1136     * cache updates
1137     */
1138    case PC_NXC_WATCH0_SPEC:
1139        xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT);
1140        val = xive->pc_regs[reg];
1141        break;
1142
1143    case PC_NXC_WATCH0_DATA0:
1144       /*
1145        * Load DATA registers from cache with data requested by the
1146        * SPEC register
1147        */
1148        pnv_xive2_nvp_cache_load(xive);
1149        val = xive->pc_regs[reg];
1150        break;
1151
1152    case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1153        val = xive->pc_regs[reg];
1154        break;
1155
1156    case PC_NXC_FLUSH_CTRL:
1157        xive->pc_regs[reg] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID;
1158        val = xive->pc_regs[reg];
1159        break;
1160
1161    /*
1162     * Indirect invalidation
1163     */
1164    case PC_AT_KILL:
1165        xive->pc_regs[reg] &= ~PC_AT_KILL_VALID;
1166        val = xive->pc_regs[reg];
1167        break;
1168
1169    default:
1170        xive2_error(xive, "PC: invalid read @%"HWADDR_PRIx, offset);
1171    }
1172
1173    return val;
1174}
1175
1176static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
1177                                  uint64_t val, unsigned size)
1178{
1179    PnvXive2 *xive = PNV_XIVE2(opaque);
1180    uint32_t reg = offset >> 3;
1181
1182    switch (offset) {
1183
1184    /*
1185     * VSD table settings. Only taken into account in the VC
1186     * sub-engine because the Xive2Router model combines both VC and PC
1187     * sub-engines
1188     */
1189    case PC_VSD_TABLE_ADDR:
1190    case PC_VSD_TABLE_DATA:
1191        break;
1192
1193    /*
1194     * cache updates
1195     */
1196    case PC_NXC_WATCH0_SPEC:
1197        val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */
1198        break;
1199
1200    case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1201        break;
1202    case PC_NXC_WATCH0_DATA0:
1203        /* writing to DATA0 triggers the cache write */
1204        xive->pc_regs[reg] = val;
1205        pnv_xive2_nvp_update(xive);
1206        break;
1207
1208   /* case PC_NXC_FLUSH_CTRL: */
1209    case PC_NXC_FLUSH_POLL:
1210        xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID;
1211        break;
1212
1213    /*
1214     * Indirect invalidation
1215     */
1216    case PC_AT_KILL:
1217    case PC_AT_KILL_MASK:
1218        break;
1219
1220    default:
1221        xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset);
1222        return;
1223    }
1224
1225    xive->pc_regs[reg] = val;
1226}
1227
1228static const MemoryRegionOps pnv_xive2_ic_pc_ops = {
1229    .read = pnv_xive2_ic_pc_read,
1230    .write = pnv_xive2_ic_pc_write,
1231    .endianness = DEVICE_BIG_ENDIAN,
1232    .valid = {
1233        .min_access_size = 8,
1234        .max_access_size = 8,
1235    },
1236    .impl = {
1237        .min_access_size = 8,
1238        .max_access_size = 8,
1239    },
1240};
1241
1242
1243static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset,
1244                                        unsigned size)
1245{
1246    PnvXive2 *xive = PNV_XIVE2(opaque);
1247    uint64_t val = -1;
1248    uint32_t reg = offset >> 3;
1249
1250    switch (offset) {
1251    /*
1252     * XIVE2 hardware thread enablement
1253     */
1254    case TCTXT_EN0:
1255    case TCTXT_EN1:
1256        val = xive->tctxt_regs[reg];
1257        break;
1258
1259    case TCTXT_EN0_SET:
1260    case TCTXT_EN0_RESET:
1261        val = xive->tctxt_regs[TCTXT_EN0 >> 3];
1262        break;
1263    case TCTXT_EN1_SET:
1264    case TCTXT_EN1_RESET:
1265        val = xive->tctxt_regs[TCTXT_EN1 >> 3];
1266        break;
1267    default:
1268        xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset);
1269    }
1270
1271    return val;
1272}
1273
1274static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
1275                                     uint64_t val, unsigned size)
1276{
1277    PnvXive2 *xive = PNV_XIVE2(opaque);
1278
1279    switch (offset) {
1280    /*
1281     * XIVE2 hardware thread enablement
1282     */
1283    case TCTXT_EN0: /* Physical Thread Enable */
1284    case TCTXT_EN1: /* Physical Thread Enable (fused core) */
1285        break;
1286
1287    case TCTXT_EN0_SET:
1288        xive->tctxt_regs[TCTXT_EN0 >> 3] |= val;
1289        break;
1290    case TCTXT_EN1_SET:
1291        xive->tctxt_regs[TCTXT_EN1 >> 3] |= val;
1292        break;
1293    case TCTXT_EN0_RESET:
1294        xive->tctxt_regs[TCTXT_EN0 >> 3] &= ~val;
1295        break;
1296    case TCTXT_EN1_RESET:
1297        xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val;
1298        break;
1299
1300    default:
1301        xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset);
1302        return;
1303    }
1304}
1305
1306static const MemoryRegionOps pnv_xive2_ic_tctxt_ops = {
1307    .read = pnv_xive2_ic_tctxt_read,
1308    .write = pnv_xive2_ic_tctxt_write,
1309    .endianness = DEVICE_BIG_ENDIAN,
1310    .valid = {
1311        .min_access_size = 8,
1312        .max_access_size = 8,
1313    },
1314    .impl = {
1315        .min_access_size = 8,
1316        .max_access_size = 8,
1317    },
1318};
1319
1320/*
1321 * Redirect XSCOM to MMIO handlers
1322 */
1323static uint64_t pnv_xive2_xscom_read(void *opaque, hwaddr offset,
1324                                     unsigned size)
1325{
1326    PnvXive2 *xive = PNV_XIVE2(opaque);
1327    uint64_t val = -1;
1328    uint32_t xscom_reg = offset >> 3;
1329    uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1330
1331    switch (xscom_reg) {
1332    case 0x000 ... 0x0FF:
1333        val = pnv_xive2_ic_cq_read(opaque, mmio_offset, size);
1334        break;
1335    case 0x100 ... 0x1FF:
1336        val = pnv_xive2_ic_vc_read(opaque, mmio_offset, size);
1337        break;
1338    case 0x200 ... 0x2FF:
1339        val = pnv_xive2_ic_pc_read(opaque, mmio_offset, size);
1340        break;
1341    case 0x300 ... 0x3FF:
1342        val = pnv_xive2_ic_tctxt_read(opaque, mmio_offset, size);
1343        break;
1344    default:
1345        xive2_error(xive, "XSCOM: invalid read @%"HWADDR_PRIx, offset);
1346    }
1347
1348    return val;
1349}
1350
1351static void pnv_xive2_xscom_write(void *opaque, hwaddr offset,
1352                                  uint64_t val, unsigned size)
1353{
1354    PnvXive2 *xive = PNV_XIVE2(opaque);
1355    uint32_t xscom_reg = offset >> 3;
1356    uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1357
1358    switch (xscom_reg) {
1359    case 0x000 ... 0x0FF:
1360        pnv_xive2_ic_cq_write(opaque, mmio_offset, val, size);
1361        break;
1362    case 0x100 ... 0x1FF:
1363        pnv_xive2_ic_vc_write(opaque, mmio_offset, val, size);
1364        break;
1365    case 0x200 ... 0x2FF:
1366        pnv_xive2_ic_pc_write(opaque, mmio_offset, val, size);
1367        break;
1368    case 0x300 ... 0x3FF:
1369        pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size);
1370        break;
1371    default:
1372        xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset);
1373    }
1374}
1375
1376static const MemoryRegionOps pnv_xive2_xscom_ops = {
1377    .read = pnv_xive2_xscom_read,
1378    .write = pnv_xive2_xscom_write,
1379    .endianness = DEVICE_BIG_ENDIAN,
1380    .valid = {
1381        .min_access_size = 8,
1382        .max_access_size = 8,
1383    },
1384    .impl = {
1385        .min_access_size = 8,
1386        .max_access_size = 8,
1387    },
1388};
1389
1390/*
1391 * Notify port page. The layout is compatible between 4K and 64K pages :
1392 *
1393 * Page 1           Notify page (writes only)
1394 *  0x000 - 0x7FF   IPI interrupt (NPU)
1395 *  0x800 - 0xFFF   HW interrupt triggers (PSI, PHB)
1396 */
1397
1398static void pnv_xive2_ic_hw_trigger(PnvXive2 *xive, hwaddr addr,
1399                                    uint64_t val)
1400{
1401    uint8_t blk;
1402    uint32_t idx;
1403
1404    if (val & XIVE_TRIGGER_END) {
1405        xive2_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1406                   addr, val);
1407        return;
1408    }
1409
1410    /*
1411     * Forward the source event notification directly to the Router.
1412     * The source interrupt number should already be correctly encoded
1413     * with the chip block id by the sending device (PHB, PSI).
1414     */
1415    blk = XIVE_EAS_BLOCK(val);
1416    idx = XIVE_EAS_INDEX(val);
1417
1418    xive2_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1419                         !!(val & XIVE_TRIGGER_PQ));
1420}
1421
1422static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset,
1423                                      uint64_t val, unsigned size)
1424{
1425    PnvXive2 *xive = PNV_XIVE2(opaque);
1426
1427    /* VC: IPI triggers */
1428    switch (offset) {
1429    case 0x000 ... 0x7FF:
1430        /* TODO: check IPI notify sub-page routing */
1431        pnv_xive2_ic_hw_trigger(opaque, offset, val);
1432        break;
1433
1434    /* VC: HW triggers */
1435    case 0x800 ... 0xFFF:
1436        pnv_xive2_ic_hw_trigger(opaque, offset, val);
1437        break;
1438
1439    default:
1440        xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset);
1441    }
1442}
1443
1444static uint64_t pnv_xive2_ic_notify_read(void *opaque, hwaddr offset,
1445                                         unsigned size)
1446{
1447    PnvXive2 *xive = PNV_XIVE2(opaque);
1448
1449   /* loads are invalid */
1450    xive2_error(xive, "NOTIFY: invalid read @%"HWADDR_PRIx, offset);
1451    return -1;
1452}
1453
1454static const MemoryRegionOps pnv_xive2_ic_notify_ops = {
1455    .read = pnv_xive2_ic_notify_read,
1456    .write = pnv_xive2_ic_notify_write,
1457    .endianness = DEVICE_BIG_ENDIAN,
1458    .valid = {
1459        .min_access_size = 8,
1460        .max_access_size = 8,
1461    },
1462    .impl = {
1463        .min_access_size = 8,
1464        .max_access_size = 8,
1465    },
1466};
1467
1468static uint64_t pnv_xive2_ic_lsi_read(void *opaque, hwaddr offset,
1469                                      unsigned size)
1470{
1471    PnvXive2 *xive = PNV_XIVE2(opaque);
1472
1473    xive2_error(xive, "LSI: invalid read @%"HWADDR_PRIx, offset);
1474    return -1;
1475}
1476
1477static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset,
1478                                   uint64_t val, unsigned size)
1479{
1480    PnvXive2 *xive = PNV_XIVE2(opaque);
1481
1482    xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset);
1483}
1484
1485static const MemoryRegionOps pnv_xive2_ic_lsi_ops = {
1486    .read = pnv_xive2_ic_lsi_read,
1487    .write = pnv_xive2_ic_lsi_write,
1488    .endianness = DEVICE_BIG_ENDIAN,
1489    .valid = {
1490        .min_access_size = 8,
1491        .max_access_size = 8,
1492    },
1493    .impl = {
1494        .min_access_size = 8,
1495        .max_access_size = 8,
1496    },
1497};
1498
1499/*
1500 * Sync MMIO page (write only)
1501 */
1502#define PNV_XIVE2_SYNC_IPI      0x000
1503#define PNV_XIVE2_SYNC_HW       0x080
1504#define PNV_XIVE2_SYNC_NxC      0x100
1505#define PNV_XIVE2_SYNC_INT      0x180
1506#define PNV_XIVE2_SYNC_OS_ESC   0x200
1507#define PNV_XIVE2_SYNC_POOL_ESC 0x280
1508#define PNV_XIVE2_SYNC_HARD_ESC 0x300
1509
1510static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
1511                                       unsigned size)
1512{
1513    PnvXive2 *xive = PNV_XIVE2(opaque);
1514
1515    /* loads are invalid */
1516    xive2_error(xive, "SYNC: invalid read @%"HWADDR_PRIx, offset);
1517    return -1;
1518}
1519
1520static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset,
1521                                    uint64_t val, unsigned size)
1522{
1523    PnvXive2 *xive = PNV_XIVE2(opaque);
1524
1525    switch (offset) {
1526    case PNV_XIVE2_SYNC_IPI:
1527    case PNV_XIVE2_SYNC_HW:
1528    case PNV_XIVE2_SYNC_NxC:
1529    case PNV_XIVE2_SYNC_INT:
1530    case PNV_XIVE2_SYNC_OS_ESC:
1531    case PNV_XIVE2_SYNC_POOL_ESC:
1532    case PNV_XIVE2_SYNC_HARD_ESC:
1533        break;
1534    default:
1535        xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset);
1536    }
1537}
1538
1539static const MemoryRegionOps pnv_xive2_ic_sync_ops = {
1540    .read = pnv_xive2_ic_sync_read,
1541    .write = pnv_xive2_ic_sync_write,
1542    .endianness = DEVICE_BIG_ENDIAN,
1543    .valid = {
1544        .min_access_size = 8,
1545        .max_access_size = 8,
1546    },
1547    .impl = {
1548        .min_access_size = 8,
1549        .max_access_size = 8,
1550    },
1551};
1552
1553/*
1554 * When the TM direct pages of the IC controller are accessed, the
1555 * target HW thread is deduced from the page offset.
1556 */
1557static uint32_t pnv_xive2_ic_tm_get_pir(PnvXive2 *xive, hwaddr offset)
1558{
1559    /* On P10, the node ID shift in the PIR register is 8 bits */
1560    return xive->chip->chip_id << 8 | offset >> xive->ic_shift;
1561}
1562
1563static XiveTCTX *pnv_xive2_get_indirect_tctx(PnvXive2 *xive, uint32_t pir)
1564{
1565    PnvChip *chip = xive->chip;
1566    PowerPCCPU *cpu = NULL;
1567
1568    cpu = pnv_chip_find_cpu(chip, pir);
1569    if (!cpu) {
1570        xive2_error(xive, "IC: invalid PIR %x for indirect access", pir);
1571        return NULL;
1572    }
1573
1574    if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
1575        xive2_error(xive, "IC: CPU %x is not enabled", pir);
1576    }
1577
1578    return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1579}
1580
1581static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque, hwaddr offset,
1582                                              unsigned size)
1583{
1584    PnvXive2 *xive = PNV_XIVE2(opaque);
1585    uint32_t pir;
1586    XiveTCTX *tctx;
1587    uint64_t val = -1;
1588
1589    pir = pnv_xive2_ic_tm_get_pir(xive, offset);
1590    tctx = pnv_xive2_get_indirect_tctx(xive, pir);
1591    if (tctx) {
1592        val = xive_tctx_tm_read(NULL, tctx, offset, size);
1593    }
1594
1595    return val;
1596}
1597
1598static void pnv_xive2_ic_tm_indirect_write(void *opaque, hwaddr offset,
1599                                           uint64_t val, unsigned size)
1600{
1601    PnvXive2 *xive = PNV_XIVE2(opaque);
1602    uint32_t pir;
1603    XiveTCTX *tctx;
1604
1605    pir = pnv_xive2_ic_tm_get_pir(xive, offset);
1606    tctx = pnv_xive2_get_indirect_tctx(xive, pir);
1607    if (tctx) {
1608        xive_tctx_tm_write(NULL, tctx, offset, val, size);
1609    }
1610}
1611
1612static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops = {
1613    .read = pnv_xive2_ic_tm_indirect_read,
1614    .write = pnv_xive2_ic_tm_indirect_write,
1615    .endianness = DEVICE_BIG_ENDIAN,
1616    .valid = {
1617        .min_access_size = 8,
1618        .max_access_size = 8,
1619    },
1620    .impl = {
1621        .min_access_size = 8,
1622        .max_access_size = 8,
1623    },
1624};
1625
1626/*
1627 * TIMA ops
1628 */
1629
1630/*
1631 * Special TIMA offsets to handle accesses in a POWER10 way.
1632 *
1633 * Only the CAM line updates done by the hypervisor should be handled
1634 * specifically.
1635 */
1636#define HV_PAGE_OFFSET         (XIVE_TM_HV_PAGE << TM_SHIFT)
1637#define HV_PUSH_OS_CTX_OFFSET  (HV_PAGE_OFFSET | (TM_QW1_OS + TM_WORD2))
1638#define HV_PULL_OS_CTX_OFFSET  (HV_PAGE_OFFSET | TM_SPC_PULL_OS_CTX)
1639
1640static void pnv_xive2_tm_write(void *opaque, hwaddr offset,
1641                               uint64_t value, unsigned size)
1642{
1643    PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1644    PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
1645    XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1646    XivePresenter *xptr = XIVE_PRESENTER(xive);
1647    bool gen1_tima_os =
1648        xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
1649
1650    /* TODO: should we switch the TM ops table instead ? */
1651    if (!gen1_tima_os && offset == HV_PUSH_OS_CTX_OFFSET) {
1652        xive2_tm_push_os_ctx(xptr, tctx, offset, value, size);
1653        return;
1654    }
1655
1656    /* Other TM ops are the same as XIVE1 */
1657    xive_tctx_tm_write(xptr, tctx, offset, value, size);
1658}
1659
1660static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size)
1661{
1662    PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1663    PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
1664    XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1665    XivePresenter *xptr = XIVE_PRESENTER(xive);
1666    bool gen1_tima_os =
1667        xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
1668
1669    /* TODO: should we switch the TM ops table instead ? */
1670    if (!gen1_tima_os && offset == HV_PULL_OS_CTX_OFFSET) {
1671        return xive2_tm_pull_os_ctx(xptr, tctx, offset, size);
1672    }
1673
1674    /* Other TM ops are the same as XIVE1 */
1675    return xive_tctx_tm_read(xptr, tctx, offset, size);
1676}
1677
1678static const MemoryRegionOps pnv_xive2_tm_ops = {
1679    .read = pnv_xive2_tm_read,
1680    .write = pnv_xive2_tm_write,
1681    .endianness = DEVICE_BIG_ENDIAN,
1682    .valid = {
1683        .min_access_size = 1,
1684        .max_access_size = 8,
1685    },
1686    .impl = {
1687        .min_access_size = 1,
1688        .max_access_size = 8,
1689    },
1690};
1691
1692static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset,
1693                                   unsigned size)
1694{
1695    PnvXive2 *xive = PNV_XIVE2(opaque);
1696
1697    xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset);
1698    return -1;
1699}
1700
1701static void pnv_xive2_nvc_write(void *opaque, hwaddr offset,
1702                                uint64_t val, unsigned size)
1703{
1704    PnvXive2 *xive = PNV_XIVE2(opaque);
1705
1706    xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset);
1707}
1708
1709static const MemoryRegionOps pnv_xive2_nvc_ops = {
1710    .read = pnv_xive2_nvc_read,
1711    .write = pnv_xive2_nvc_write,
1712    .endianness = DEVICE_BIG_ENDIAN,
1713    .valid = {
1714        .min_access_size = 8,
1715        .max_access_size = 8,
1716    },
1717    .impl = {
1718        .min_access_size = 8,
1719        .max_access_size = 8,
1720    },
1721};
1722
1723static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset,
1724                                    unsigned size)
1725{
1726    PnvXive2 *xive = PNV_XIVE2(opaque);
1727
1728    xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset);
1729    return -1;
1730}
1731
1732static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset,
1733                                 uint64_t val, unsigned size)
1734{
1735    PnvXive2 *xive = PNV_XIVE2(opaque);
1736
1737    xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset);
1738}
1739
1740static const MemoryRegionOps pnv_xive2_nvpg_ops = {
1741    .read = pnv_xive2_nvpg_read,
1742    .write = pnv_xive2_nvpg_write,
1743    .endianness = DEVICE_BIG_ENDIAN,
1744    .valid = {
1745        .min_access_size = 8,
1746        .max_access_size = 8,
1747    },
1748    .impl = {
1749        .min_access_size = 8,
1750        .max_access_size = 8,
1751    },
1752};
1753
1754/*
1755 * POWER10 default capabilities: 0x2000120076f000FC
1756 */
1757#define PNV_XIVE2_CAPABILITIES  0x2000120076f000FC
1758
1759/*
1760 * POWER10 default configuration: 0x0030000033000000
1761 *
1762 * 8bits thread id was dropped for P10
1763 */
1764#define PNV_XIVE2_CONFIGURATION 0x0030000033000000
1765
1766static void pnv_xive2_reset(void *dev)
1767{
1768    PnvXive2 *xive = PNV_XIVE2(dev);
1769    XiveSource *xsrc = &xive->ipi_source;
1770    Xive2EndSource *end_xsrc = &xive->end_source;
1771
1772    xive->cq_regs[CQ_XIVE_CAP >> 3] = xive->capabilities;
1773    xive->cq_regs[CQ_XIVE_CFG >> 3] = xive->config;
1774
1775    /* HW hardwires the #Topology of the chip in the block field */
1776    xive->cq_regs[CQ_XIVE_CFG >> 3] |=
1777        SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id);
1778
1779    /* Set default page size to 64k */
1780    xive->ic_shift = xive->esb_shift = xive->end_shift = 16;
1781    xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16;
1782
1783    /* Clear source MMIOs */
1784    if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1785        memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
1786    }
1787
1788    if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1789        memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
1790    }
1791}
1792
1793/*
1794 *  Maximum number of IRQs and ENDs supported by HW. Will be tuned by
1795 *  software.
1796 */
1797#define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1798#define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1799
1800static void pnv_xive2_realize(DeviceState *dev, Error **errp)
1801{
1802    PnvXive2 *xive = PNV_XIVE2(dev);
1803    PnvXive2Class *pxc = PNV_XIVE2_GET_CLASS(dev);
1804    XiveSource *xsrc = &xive->ipi_source;
1805    Xive2EndSource *end_xsrc = &xive->end_source;
1806    Error *local_err = NULL;
1807    int i;
1808
1809    pxc->parent_realize(dev, &local_err);
1810    if (local_err) {
1811        error_propagate(errp, local_err);
1812        return;
1813    }
1814
1815    assert(xive->chip);
1816
1817    /*
1818     * The XiveSource and Xive2EndSource objects are realized with the
1819     * maximum allowed HW configuration. The ESB MMIO regions will be
1820     * resized dynamically when the controller is configured by the FW
1821     * to limit accesses to resources not provisioned.
1822     */
1823    object_property_set_int(OBJECT(xsrc), "flags", XIVE_SRC_STORE_EOI,
1824                            &error_fatal);
1825    object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE2_NR_IRQS,
1826                            &error_fatal);
1827    object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive),
1828                             &error_fatal);
1829    qdev_realize(DEVICE(xsrc), NULL, &local_err);
1830    if (local_err) {
1831        error_propagate(errp, local_err);
1832        return;
1833    }
1834
1835    object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE2_NR_ENDS,
1836                            &error_fatal);
1837    object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1838                             &error_abort);
1839    qdev_realize(DEVICE(end_xsrc), NULL, &local_err);
1840    if (local_err) {
1841        error_propagate(errp, local_err);
1842        return;
1843    }
1844
1845    /* XSCOM region, used for initial configuration of the BARs */
1846    memory_region_init_io(&xive->xscom_regs, OBJECT(dev),
1847                          &pnv_xive2_xscom_ops, xive, "xscom-xive",
1848                          PNV10_XSCOM_XIVE2_SIZE << 3);
1849
1850    /* Interrupt controller MMIO regions */
1851    xive->ic_shift = 16;
1852    memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1853                       PNV10_XIVE2_IC_SIZE);
1854
1855    for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
1856        memory_region_init_io(&xive->ic_mmios[i], OBJECT(dev),
1857                         pnv_xive2_ic_regions[i].ops, xive,
1858                         pnv_xive2_ic_regions[i].name,
1859                         pnv_xive2_ic_regions[i].pgsize << xive->ic_shift);
1860    }
1861
1862    /*
1863     * VC MMIO regions.
1864     */
1865    xive->esb_shift = 16;
1866    xive->end_shift = 16;
1867    memory_region_init(&xive->esb_mmio, OBJECT(xive), "xive-esb",
1868                       PNV10_XIVE2_ESB_SIZE);
1869    memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-end",
1870                       PNV10_XIVE2_END_SIZE);
1871
1872    /* Presenter Controller MMIO region (not modeled) */
1873    xive->nvc_shift = 16;
1874    xive->nvpg_shift = 16;
1875    memory_region_init_io(&xive->nvc_mmio, OBJECT(dev),
1876                          &pnv_xive2_nvc_ops, xive,
1877                          "xive-nvc", PNV10_XIVE2_NVC_SIZE);
1878
1879    memory_region_init_io(&xive->nvpg_mmio, OBJECT(dev),
1880                          &pnv_xive2_nvpg_ops, xive,
1881                          "xive-nvpg", PNV10_XIVE2_NVPG_SIZE);
1882
1883    /* Thread Interrupt Management Area (Direct) */
1884    xive->tm_shift = 16;
1885    memory_region_init_io(&xive->tm_mmio, OBJECT(dev), &pnv_xive2_tm_ops,
1886                          xive, "xive-tima", PNV10_XIVE2_TM_SIZE);
1887
1888    qemu_register_reset(pnv_xive2_reset, dev);
1889}
1890
1891static Property pnv_xive2_properties[] = {
1892    DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0),
1893    DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0),
1894    DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0),
1895    DEFINE_PROP_UINT64("nvc-bar", PnvXive2, nvc_base, 0),
1896    DEFINE_PROP_UINT64("nvpg-bar", PnvXive2, nvpg_base, 0),
1897    DEFINE_PROP_UINT64("tm-bar", PnvXive2, tm_base, 0),
1898    DEFINE_PROP_UINT64("capabilities", PnvXive2, capabilities,
1899                       PNV_XIVE2_CAPABILITIES),
1900    DEFINE_PROP_UINT64("config", PnvXive2, config,
1901                       PNV_XIVE2_CONFIGURATION),
1902    DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *),
1903    DEFINE_PROP_END_OF_LIST(),
1904};
1905
1906static void pnv_xive2_instance_init(Object *obj)
1907{
1908    PnvXive2 *xive = PNV_XIVE2(obj);
1909
1910    object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1911                            TYPE_XIVE_SOURCE);
1912    object_initialize_child(obj, "end_source", &xive->end_source,
1913                            TYPE_XIVE2_END_SOURCE);
1914}
1915
1916static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt,
1917                              int xscom_offset)
1918{
1919    const char compat_p10[] = "ibm,power10-xive-x";
1920    char *name;
1921    int offset;
1922    uint32_t reg[] = {
1923        cpu_to_be32(PNV10_XSCOM_XIVE2_BASE),
1924        cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE)
1925    };
1926
1927    name = g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE);
1928    offset = fdt_add_subnode(fdt, xscom_offset, name);
1929    _FDT(offset);
1930    g_free(name);
1931
1932    _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1933    _FDT(fdt_setprop(fdt, offset, "compatible", compat_p10,
1934                     sizeof(compat_p10)));
1935    return 0;
1936}
1937
1938static void pnv_xive2_class_init(ObjectClass *klass, void *data)
1939{
1940    DeviceClass *dc = DEVICE_CLASS(klass);
1941    PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1942    Xive2RouterClass *xrc = XIVE2_ROUTER_CLASS(klass);
1943    XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1944    XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
1945    PnvXive2Class *pxc = PNV_XIVE2_CLASS(klass);
1946
1947    xdc->dt_xscom  = pnv_xive2_dt_xscom;
1948
1949    dc->desc       = "PowerNV XIVE2 Interrupt Controller (POWER10)";
1950    device_class_set_parent_realize(dc, pnv_xive2_realize,
1951                                    &pxc->parent_realize);
1952    device_class_set_props(dc, pnv_xive2_properties);
1953
1954    xrc->get_eas   = pnv_xive2_get_eas;
1955    xrc->get_pq    = pnv_xive2_get_pq;
1956    xrc->set_pq    = pnv_xive2_set_pq;
1957    xrc->get_end   = pnv_xive2_get_end;
1958    xrc->write_end = pnv_xive2_write_end;
1959    xrc->get_nvp   = pnv_xive2_get_nvp;
1960    xrc->write_nvp = pnv_xive2_write_nvp;
1961    xrc->get_config  = pnv_xive2_get_config;
1962    xrc->get_block_id = pnv_xive2_get_block_id;
1963
1964    xnc->notify    = pnv_xive2_notify;
1965
1966    xpc->match_nvt  = pnv_xive2_match_nvt;
1967};
1968
1969static const TypeInfo pnv_xive2_info = {
1970    .name          = TYPE_PNV_XIVE2,
1971    .parent        = TYPE_XIVE2_ROUTER,
1972    .instance_init = pnv_xive2_instance_init,
1973    .instance_size = sizeof(PnvXive2),
1974    .class_init    = pnv_xive2_class_init,
1975    .class_size    = sizeof(PnvXive2Class),
1976    .interfaces    = (InterfaceInfo[]) {
1977        { TYPE_PNV_XSCOM_INTERFACE },
1978        { }
1979    }
1980};
1981
1982static void pnv_xive2_register_types(void)
1983{
1984    type_register_static(&pnv_xive2_info);
1985}
1986
1987type_init(pnv_xive2_register_types)
1988
1989static void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx,
1990                                     Monitor *mon)
1991{
1992    uint8_t  eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
1993    uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
1994
1995    if (!xive2_nvp_is_valid(nvp)) {
1996        return;
1997    }
1998
1999    monitor_printf(mon, "  %08x end:%02x/%04x IPB:%02x",
2000                   nvp_idx, eq_blk, eq_idx,
2001                   xive_get_field32(NVP2_W2_IPB, nvp->w2));
2002    /*
2003     * When the NVP is HW controlled, more fields are updated
2004     */
2005    if (xive2_nvp_is_hw(nvp)) {
2006        monitor_printf(mon, " CPPR:%02x",
2007                       xive_get_field32(NVP2_W2_CPPR, nvp->w2));
2008        if (xive2_nvp_is_co(nvp)) {
2009            monitor_printf(mon, " CO:%04x",
2010                           xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
2011        }
2012    }
2013    monitor_printf(mon, "\n");
2014}
2015
2016/*
2017 * If the table is direct, we can compute the number of PQ entries
2018 * provisioned by FW.
2019 */
2020static uint32_t pnv_xive2_nr_esbs(PnvXive2 *xive)
2021{
2022    uint8_t blk = pnv_xive2_block_id(xive);
2023    uint64_t vsd = xive->vsds[VST_ESB][blk];
2024    uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
2025
2026    return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
2027}
2028
2029/*
2030 * Compute the number of entries per indirect subpage.
2031 */
2032static uint64_t pnv_xive2_vst_per_subpage(PnvXive2 *xive, uint32_t type)
2033{
2034    uint8_t blk = pnv_xive2_block_id(xive);
2035    uint64_t vsd = xive->vsds[type][blk];
2036    const XiveVstInfo *info = &vst_infos[type];
2037    uint64_t vsd_addr;
2038    uint32_t page_shift;
2039
2040    /* For direct tables, fake a valid value */
2041    if (!(VSD_INDIRECT & vsd)) {
2042        return 1;
2043    }
2044
2045    /* Get the page size of the indirect table. */
2046    vsd_addr = vsd & VSD_ADDRESS_MASK;
2047    ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
2048
2049    if (!(vsd & VSD_ADDRESS_MASK)) {
2050#ifdef XIVE2_DEBUG
2051        xive2_error(xive, "VST: invalid %s entry!?", info->name);
2052#endif
2053        return 0;
2054    }
2055
2056    page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
2057
2058    if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
2059        xive2_error(xive, "VST: invalid %s page shift %d", info->name,
2060                   page_shift);
2061        return 0;
2062    }
2063
2064    return (1ull << page_shift) / info->size;
2065}
2066
2067void pnv_xive2_pic_print_info(PnvXive2 *xive, Monitor *mon)
2068{
2069    Xive2Router *xrtr = XIVE2_ROUTER(xive);
2070    uint8_t blk = pnv_xive2_block_id(xive);
2071    uint8_t chip_id = xive->chip->chip_id;
2072    uint32_t srcno0 = XIVE_EAS(blk, 0);
2073    uint32_t nr_esbs = pnv_xive2_nr_esbs(xive);
2074    Xive2Eas eas;
2075    Xive2End end;
2076    Xive2Nvp nvp;
2077    int i;
2078    uint64_t xive_nvp_per_subpage;
2079
2080    monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0,
2081                   srcno0 + nr_esbs - 1);
2082    xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
2083
2084    monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0,
2085                   srcno0 + nr_esbs - 1);
2086    for (i = 0; i < nr_esbs; i++) {
2087        if (xive2_router_get_eas(xrtr, blk, i, &eas)) {
2088            break;
2089        }
2090        if (!xive2_eas_is_masked(&eas)) {
2091            xive2_eas_pic_print_info(&eas, i, mon);
2092        }
2093    }
2094
2095    monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk);
2096    i = 0;
2097    while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2098        xive2_end_eas_pic_print_info(&end, i++, mon);
2099    }
2100
2101    monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk);
2102    i = 0;
2103    while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2104        xive2_end_pic_print_info(&end, i++, mon);
2105    }
2106
2107    monitor_printf(mon, "XIVE[%x] #%d NVPT %08x .. %08x\n", chip_id, blk,
2108                   0, XIVE2_NVP_COUNT - 1);
2109    xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
2110    for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) {
2111        while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) {
2112            xive2_nvp_pic_print_info(&nvp, i++, mon);
2113        }
2114    }
2115}
2116