qemu/hw/intc/pnv_xive.c
<<
>>
Prefs
   1/*
   2 * QEMU PowerPC XIVE interrupt controller model
   3 *
   4 * Copyright (c) 2017-2019, IBM Corporation.
   5 *
   6 * This code is licensed under the GPL version 2 or later. See the
   7 * COPYING file in the top-level directory.
   8 */
   9
  10#include "qemu/osdep.h"
  11#include "qemu/log.h"
  12#include "qemu/module.h"
  13#include "qapi/error.h"
  14#include "target/ppc/cpu.h"
  15#include "sysemu/cpus.h"
  16#include "sysemu/dma.h"
  17#include "sysemu/reset.h"
  18#include "monitor/monitor.h"
  19#include "hw/ppc/fdt.h"
  20#include "hw/ppc/pnv.h"
  21#include "hw/ppc/pnv_core.h"
  22#include "hw/ppc/pnv_xscom.h"
  23#include "hw/ppc/pnv_xive.h"
  24#include "hw/ppc/xive_regs.h"
  25#include "hw/qdev-properties.h"
  26#include "hw/ppc/ppc.h"
  27
  28#include <libfdt.h>
  29
  30#include "pnv_xive_regs.h"
  31
  32#define XIVE_DEBUG
  33
  34/*
  35 * Virtual structures table (VST)
  36 */
  37#define SBE_PER_BYTE   4
  38
  39typedef struct XiveVstInfo {
  40    const char *name;
  41    uint32_t    size;
  42    uint32_t    max_blocks;
  43} XiveVstInfo;
  44
  45static const XiveVstInfo vst_infos[] = {
  46    [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
  47    [VST_TSEL_SBE]  = { "SBE",  1,               16 },
  48    [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
  49    [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
  50
  51    /*
  52     *  Interrupt fifo backing store table (not modeled) :
  53     *
  54     * 0 - IPI,
  55     * 1 - HWD,
  56     * 2 - First escalate,
  57     * 3 - Second escalate,
  58     * 4 - Redistribution,
  59     * 5 - IPI cascaded queue ?
  60     */
  61    [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
  62};
  63
  64#define xive_error(xive, fmt, ...)                                      \
  65    qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
  66                  (xive)->chip->chip_id, ## __VA_ARGS__);
  67
  68/*
  69 * QEMU version of the GETFIELD/SETFIELD macros
  70 *
  71 * TODO: It might be better to use the existing extract64() and
  72 * deposit64() but this means that all the register definitions will
  73 * change and become incompatible with the ones found in skiboot.
  74 *
  75 * Keep it as it is for now until we find a common ground.
  76 */
  77static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
  78{
  79    return (word & mask) >> ctz64(mask);
  80}
  81
  82static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
  83                                uint64_t value)
  84{
  85    return (word & ~mask) | ((value << ctz64(mask)) & mask);
  86}
  87
  88/*
  89 * Remote access to controllers. HW uses MMIOs. For now, a simple scan
  90 * of the chips is good enough.
  91 *
  92 * TODO: Block scope support
  93 */
  94static PnvXive *pnv_xive_get_ic(uint8_t blk)
  95{
  96    PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
  97    int i;
  98
  99    for (i = 0; i < pnv->num_chips; i++) {
 100        Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
 101        PnvXive *xive = &chip9->xive;
 102
 103        if (xive->chip->chip_id == blk) {
 104            return xive;
 105        }
 106    }
 107    return NULL;
 108}
 109
 110/*
 111 * VST accessors for SBE, EAT, ENDT, NVT
 112 *
 113 * Indirect VST tables are arrays of VSDs pointing to a page (of same
 114 * size). Each page is a direct VST table.
 115 */
 116
 117#define XIVE_VSD_SIZE 8
 118
 119/* Indirect page size can be 4K, 64K, 2M, 16M. */
 120static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
 121{
 122     return page_shift == 12 || page_shift == 16 ||
 123         page_shift == 21 || page_shift == 24;
 124}
 125
 126static uint64_t pnv_xive_vst_size(uint64_t vsd)
 127{
 128    uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
 129
 130    /*
 131     * Read the first descriptor to get the page size of the indirect
 132     * table.
 133     */
 134    if (VSD_INDIRECT & vsd) {
 135        uint32_t nr_pages = vst_tsize / XIVE_VSD_SIZE;
 136        uint32_t page_shift;
 137
 138        vsd = ldq_be_dma(&address_space_memory, vsd & VSD_ADDRESS_MASK);
 139        page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
 140
 141        if (!pnv_xive_vst_page_size_allowed(page_shift)) {
 142            return 0;
 143        }
 144
 145        return nr_pages * (1ull << page_shift);
 146    }
 147
 148    return vst_tsize;
 149}
 150
 151static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
 152                                         uint64_t vsd, uint32_t idx)
 153{
 154    const XiveVstInfo *info = &vst_infos[type];
 155    uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
 156
 157    return vst_addr + idx * info->size;
 158}
 159
 160static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
 161                                           uint64_t vsd, uint32_t idx)
 162{
 163    const XiveVstInfo *info = &vst_infos[type];
 164    uint64_t vsd_addr;
 165    uint32_t vsd_idx;
 166    uint32_t page_shift;
 167    uint32_t vst_per_page;
 168
 169    /* Get the page size of the indirect table. */
 170    vsd_addr = vsd & VSD_ADDRESS_MASK;
 171    vsd = ldq_be_dma(&address_space_memory, vsd_addr);
 172
 173    if (!(vsd & VSD_ADDRESS_MASK)) {
 174        xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
 175        return 0;
 176    }
 177
 178    page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
 179
 180    if (!pnv_xive_vst_page_size_allowed(page_shift)) {
 181        xive_error(xive, "VST: invalid %s page shift %d", info->name,
 182                   page_shift);
 183        return 0;
 184    }
 185
 186    vst_per_page = (1ull << page_shift) / info->size;
 187    vsd_idx = idx / vst_per_page;
 188
 189    /* Load the VSD we are looking for, if not already done */
 190    if (vsd_idx) {
 191        vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
 192        vsd = ldq_be_dma(&address_space_memory, vsd_addr);
 193
 194        if (!(vsd & VSD_ADDRESS_MASK)) {
 195            xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
 196            return 0;
 197        }
 198
 199        /*
 200         * Check that the pages have a consistent size across the
 201         * indirect table
 202         */
 203        if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
 204            xive_error(xive, "VST: %s entry %x indirect page size differ !?",
 205                       info->name, idx);
 206            return 0;
 207        }
 208    }
 209
 210    return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
 211}
 212
 213static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
 214                                  uint32_t idx)
 215{
 216    const XiveVstInfo *info = &vst_infos[type];
 217    uint64_t vsd;
 218    uint32_t idx_max;
 219
 220    if (blk >= info->max_blocks) {
 221        xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
 222                   blk, info->name, idx);
 223        return 0;
 224    }
 225
 226    vsd = xive->vsds[type][blk];
 227
 228    /* Remote VST access */
 229    if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
 230        xive = pnv_xive_get_ic(blk);
 231
 232        return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
 233    }
 234
 235    idx_max = pnv_xive_vst_size(vsd) / info->size - 1;
 236    if (idx > idx_max) {
 237#ifdef XIVE_DEBUG
 238        xive_error(xive, "VST: %s entry %x/%x out of range [ 0 .. %x ] !?",
 239                   info->name, blk, idx, idx_max);
 240#endif
 241        return 0;
 242    }
 243
 244    if (VSD_INDIRECT & vsd) {
 245        return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
 246    }
 247
 248    return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
 249}
 250
 251static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
 252                             uint32_t idx, void *data)
 253{
 254    const XiveVstInfo *info = &vst_infos[type];
 255    uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
 256
 257    if (!addr) {
 258        return -1;
 259    }
 260
 261    cpu_physical_memory_read(addr, data, info->size);
 262    return 0;
 263}
 264
 265#define XIVE_VST_WORD_ALL -1
 266
 267static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
 268                              uint32_t idx, void *data, uint32_t word_number)
 269{
 270    const XiveVstInfo *info = &vst_infos[type];
 271    uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
 272
 273    if (!addr) {
 274        return -1;
 275    }
 276
 277    if (word_number == XIVE_VST_WORD_ALL) {
 278        cpu_physical_memory_write(addr, data, info->size);
 279    } else {
 280        cpu_physical_memory_write(addr + word_number * 4,
 281                                  data + word_number * 4, 4);
 282    }
 283    return 0;
 284}
 285
 286static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
 287                            XiveEND *end)
 288{
 289    return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
 290}
 291
 292static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
 293                              XiveEND *end, uint8_t word_number)
 294{
 295    return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
 296                              word_number);
 297}
 298
 299static int pnv_xive_end_update(PnvXive *xive)
 300{
 301    uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
 302                           xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
 303    uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
 304                           xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
 305    int i;
 306    uint64_t eqc_watch[4];
 307
 308    for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
 309        eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
 310    }
 311
 312    return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
 313                              XIVE_VST_WORD_ALL);
 314}
 315
 316static void pnv_xive_end_cache_load(PnvXive *xive)
 317{
 318    uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
 319                           xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
 320    uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
 321                           xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
 322    uint64_t eqc_watch[4] = { 0 };
 323    int i;
 324
 325    if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
 326        xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
 327    }
 328
 329    for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
 330        xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
 331    }
 332}
 333
 334static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
 335                            XiveNVT *nvt)
 336{
 337    return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
 338}
 339
 340static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
 341                              XiveNVT *nvt, uint8_t word_number)
 342{
 343    return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
 344                              word_number);
 345}
 346
 347static int pnv_xive_nvt_update(PnvXive *xive)
 348{
 349    uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
 350                           xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
 351    uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
 352                           xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
 353    int i;
 354    uint64_t vpc_watch[8];
 355
 356    for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
 357        vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
 358    }
 359
 360    return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
 361                              XIVE_VST_WORD_ALL);
 362}
 363
 364static void pnv_xive_nvt_cache_load(PnvXive *xive)
 365{
 366    uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
 367                           xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
 368    uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
 369                           xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
 370    uint64_t vpc_watch[8] = { 0 };
 371    int i;
 372
 373    if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
 374        xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
 375    }
 376
 377    for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
 378        xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
 379    }
 380}
 381
 382static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
 383                            XiveEAS *eas)
 384{
 385    PnvXive *xive = PNV_XIVE(xrtr);
 386
 387    if (pnv_xive_get_ic(blk) != xive) {
 388        xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
 389        return -1;
 390    }
 391
 392    return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
 393}
 394
 395static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs)
 396{
 397    PowerPCCPU *cpu = POWERPC_CPU(cs);
 398    XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
 399    PnvXive *xive = NULL;
 400    CPUPPCState *env = &cpu->env;
 401    int pir = env->spr_cb[SPR_PIR].default_value;
 402
 403    /*
 404     * Perform an extra check on the HW thread enablement.
 405     *
 406     * The TIMA is shared among the chips and to identify the chip
 407     * from which the access is being done, we extract the chip id
 408     * from the PIR.
 409     */
 410    xive = pnv_xive_get_ic((pir >> 8) & 0xf);
 411    if (!xive) {
 412        return NULL;
 413    }
 414
 415    if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) {
 416        xive_error(PNV_XIVE(xrtr), "IC: CPU %x is not enabled", pir);
 417    }
 418
 419    return tctx;
 420}
 421
 422/*
 423 * The internal sources (IPIs) of the interrupt controller have no
 424 * knowledge of the XIVE chip on which they reside. Encode the block
 425 * id in the source interrupt number before forwarding the source
 426 * event notification to the Router. This is required on a multichip
 427 * system.
 428 */
 429static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno)
 430{
 431    PnvXive *xive = PNV_XIVE(xn);
 432    uint8_t blk = xive->chip->chip_id;
 433
 434    xive_router_notify(xn, XIVE_EAS(blk, srcno));
 435}
 436
 437/*
 438 * XIVE helpers
 439 */
 440
 441static uint64_t pnv_xive_vc_size(PnvXive *xive)
 442{
 443    return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
 444}
 445
 446static uint64_t pnv_xive_edt_shift(PnvXive *xive)
 447{
 448    return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
 449}
 450
 451static uint64_t pnv_xive_pc_size(PnvXive *xive)
 452{
 453    return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
 454}
 455
 456static uint32_t pnv_xive_nr_ipis(PnvXive *xive)
 457{
 458    uint8_t blk = xive->chip->chip_id;
 459
 460    return pnv_xive_vst_size(xive->vsds[VST_TSEL_SBE][blk]) * SBE_PER_BYTE;
 461}
 462
 463static uint32_t pnv_xive_nr_ends(PnvXive *xive)
 464{
 465    uint8_t blk = xive->chip->chip_id;
 466
 467    return pnv_xive_vst_size(xive->vsds[VST_TSEL_EQDT][blk])
 468        / vst_infos[VST_TSEL_EQDT].size;
 469}
 470
 471/*
 472 * EDT Table
 473 *
 474 * The Virtualization Controller MMIO region containing the IPI ESB
 475 * pages and END ESB pages is sub-divided into "sets" which map
 476 * portions of the VC region to the different ESB pages. It is
 477 * configured at runtime through the EDT "Domain Table" to let the
 478 * firmware decide how to split the VC address space between IPI ESB
 479 * pages and END ESB pages.
 480 */
 481
 482/*
 483 * Computes the overall size of the IPI or the END ESB pages
 484 */
 485static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
 486{
 487    uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
 488    uint64_t size = 0;
 489    int i;
 490
 491    for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
 492        uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
 493
 494        if (edt_type == type) {
 495            size += edt_size;
 496        }
 497    }
 498
 499    return size;
 500}
 501
 502/*
 503 * Maps an offset of the VC region in the IPI or END region using the
 504 * layout defined by the EDT "Domaine Table"
 505 */
 506static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
 507                                              uint64_t type)
 508{
 509    int i;
 510    uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
 511    uint64_t edt_offset = vc_offset;
 512
 513    for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
 514        uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
 515
 516        if (edt_type != type) {
 517            edt_offset -= edt_size;
 518        }
 519    }
 520
 521    return edt_offset;
 522}
 523
 524static void pnv_xive_edt_resize(PnvXive *xive)
 525{
 526    uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
 527    uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
 528
 529    memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
 530    memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
 531
 532    memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
 533    memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
 534}
 535
 536/*
 537 * XIVE Table configuration. Only EDT is supported.
 538 */
 539static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
 540{
 541    uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
 542    uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
 543    uint64_t *xive_table;
 544    uint8_t max_index;
 545
 546    switch (tsel) {
 547    case CQ_TAR_TSEL_BLK:
 548        max_index = ARRAY_SIZE(xive->blk);
 549        xive_table = xive->blk;
 550        break;
 551    case CQ_TAR_TSEL_MIG:
 552        max_index = ARRAY_SIZE(xive->mig);
 553        xive_table = xive->mig;
 554        break;
 555    case CQ_TAR_TSEL_EDT:
 556        max_index = ARRAY_SIZE(xive->edt);
 557        xive_table = xive->edt;
 558        break;
 559    case CQ_TAR_TSEL_VDT:
 560        max_index = ARRAY_SIZE(xive->vdt);
 561        xive_table = xive->vdt;
 562        break;
 563    default:
 564        xive_error(xive, "IC: invalid table %d", (int) tsel);
 565        return -1;
 566    }
 567
 568    if (tsel_index >= max_index) {
 569        xive_error(xive, "IC: invalid index %d", (int) tsel_index);
 570        return -1;
 571    }
 572
 573    xive_table[tsel_index] = val;
 574
 575    if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
 576        xive->regs[CQ_TAR >> 3] =
 577            SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
 578    }
 579
 580    /*
 581     * EDT configuration is complete. Resize the MMIO windows exposing
 582     * the IPI and the END ESBs in the VC region.
 583     */
 584    if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
 585        pnv_xive_edt_resize(xive);
 586    }
 587
 588    return 0;
 589}
 590
 591/*
 592 * Virtual Structure Tables (VST) configuration
 593 */
 594static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
 595                                       uint8_t blk, uint64_t vsd)
 596{
 597    XiveENDSource *end_xsrc = &xive->end_source;
 598    XiveSource *xsrc = &xive->ipi_source;
 599    const XiveVstInfo *info = &vst_infos[type];
 600    uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
 601    uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
 602
 603    /* Basic checks */
 604
 605    if (VSD_INDIRECT & vsd) {
 606        if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
 607            xive_error(xive, "VST: %s indirect tables are not enabled",
 608                       info->name);
 609            return;
 610        }
 611
 612        if (!pnv_xive_vst_page_size_allowed(page_shift)) {
 613            xive_error(xive, "VST: invalid %s page shift %d", info->name,
 614                       page_shift);
 615            return;
 616        }
 617    }
 618
 619    if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
 620        xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
 621                   " page shift %d", info->name, vst_addr, page_shift);
 622        return;
 623    }
 624
 625    /* Record the table configuration (in SRAM on HW) */
 626    xive->vsds[type][blk] = vsd;
 627
 628    /* Now tune the models with the configuration provided by the FW */
 629
 630    switch (type) {
 631    case VST_TSEL_IVT:  /* Nothing to be done */
 632        break;
 633
 634    case VST_TSEL_EQDT:
 635        /*
 636         * Backing store pages for the END. Compute the number of ENDs
 637         * provisioned by FW and resize the END ESB window accordingly.
 638         */
 639        memory_region_set_size(&end_xsrc->esb_mmio, pnv_xive_nr_ends(xive) *
 640                               (1ull << (end_xsrc->esb_shift + 1)));
 641        memory_region_add_subregion(&xive->end_edt_mmio, 0,
 642                                    &end_xsrc->esb_mmio);
 643        break;
 644
 645    case VST_TSEL_SBE:
 646        /*
 647         * Backing store pages for the source PQ bits. The model does
 648         * not use these PQ bits backed in RAM because the XiveSource
 649         * model has its own. Compute the number of IRQs provisioned
 650         * by FW and resize the IPI ESB window accordingly.
 651         */
 652        memory_region_set_size(&xsrc->esb_mmio, pnv_xive_nr_ipis(xive) *
 653                               (1ull << xsrc->esb_shift));
 654        memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
 655        break;
 656
 657    case VST_TSEL_VPDT: /* Not modeled */
 658    case VST_TSEL_IRQ:  /* Not modeled */
 659        /*
 660         * These tables contains the backing store pages for the
 661         * interrupt fifos of the VC sub-engine in case of overflow.
 662         */
 663        break;
 664
 665    default:
 666        g_assert_not_reached();
 667    }
 668}
 669
 670/*
 671 * Both PC and VC sub-engines are configured as each use the Virtual
 672 * Structure Tables : SBE, EAS, END and NVT.
 673 */
 674static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
 675{
 676    uint8_t mode = GETFIELD(VSD_MODE, vsd);
 677    uint8_t type = GETFIELD(VST_TABLE_SELECT,
 678                            xive->regs[VC_VSD_TABLE_ADDR >> 3]);
 679    uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
 680                           xive->regs[VC_VSD_TABLE_ADDR >> 3]);
 681    uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
 682
 683    if (type > VST_TSEL_IRQ) {
 684        xive_error(xive, "VST: invalid table type %d", type);
 685        return;
 686    }
 687
 688    if (blk >= vst_infos[type].max_blocks) {
 689        xive_error(xive, "VST: invalid block id %d for"
 690                      " %s table", blk, vst_infos[type].name);
 691        return;
 692    }
 693
 694    /*
 695     * Only take the VC sub-engine configuration into account because
 696     * the XiveRouter model combines both VC and PC sub-engines
 697     */
 698    if (pc_engine) {
 699        return;
 700    }
 701
 702    if (!vst_addr) {
 703        xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
 704        return;
 705    }
 706
 707    switch (mode) {
 708    case VSD_MODE_FORWARD:
 709        xive->vsds[type][blk] = vsd;
 710        break;
 711
 712    case VSD_MODE_EXCLUSIVE:
 713        pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
 714        break;
 715
 716    default:
 717        xive_error(xive, "VST: unsupported table mode %d", mode);
 718        return;
 719    }
 720}
 721
 722/*
 723 * Interrupt controller MMIO region. The layout is compatible between
 724 * 4K and 64K pages :
 725 *
 726 * Page 0           sub-engine BARs
 727 *  0x000 - 0x3FF   IC registers
 728 *  0x400 - 0x7FF   PC registers
 729 *  0x800 - 0xFFF   VC registers
 730 *
 731 * Page 1           Notify page (writes only)
 732 *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
 733 *  0x800 - 0xFFF   forwards and syncs
 734 *
 735 * Page 2           LSI Trigger page (writes only) (not modeled)
 736 * Page 3           LSI SB EOI page (reads only) (not modeled)
 737 *
 738 * Page 4-7         indirect TIMA
 739 */
 740
 741/*
 742 * IC - registers MMIO
 743 */
 744static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
 745                                  uint64_t val, unsigned size)
 746{
 747    PnvXive *xive = PNV_XIVE(opaque);
 748    MemoryRegion *sysmem = get_system_memory();
 749    uint32_t reg = offset >> 3;
 750    bool is_chip0 = xive->chip->chip_id == 0;
 751
 752    switch (offset) {
 753
 754    /*
 755     * XIVE CQ (PowerBus bridge) settings
 756     */
 757    case CQ_MSGSND:     /* msgsnd for doorbells */
 758    case CQ_FIRMASK_OR: /* FIR error reporting */
 759        break;
 760    case CQ_PBI_CTL:
 761        if (val & CQ_PBI_PC_64K) {
 762            xive->pc_shift = 16;
 763        }
 764        if (val & CQ_PBI_VC_64K) {
 765            xive->vc_shift = 16;
 766        }
 767        break;
 768    case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
 769        /*
 770         * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
 771         */
 772        break;
 773
 774    /*
 775     * XIVE Virtualization Controller settings
 776     */
 777    case VC_GLOBAL_CONFIG:
 778        break;
 779
 780    /*
 781     * XIVE Presenter Controller settings
 782     */
 783    case PC_GLOBAL_CONFIG:
 784        /*
 785         * PC_GCONF_CHIPID_OVR
 786         *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
 787         */
 788        break;
 789    case PC_TCTXT_CFG:
 790        /*
 791         * TODO: block group support
 792         *
 793         * PC_TCTXT_CFG_BLKGRP_EN
 794         * PC_TCTXT_CFG_HARD_CHIPID_BLK :
 795         *   Moves the chipid into block field for hardwired CAM compares.
 796         *   Block offset value is adjusted to 0b0..01 & ThrdId
 797         *
 798         *   Will require changes in xive_presenter_tctx_match(). I am
 799         *   not sure how to handle that yet.
 800         */
 801
 802        /* Overrides hardwired chip ID with the chip ID field */
 803        if (val & PC_TCTXT_CHIPID_OVERRIDE) {
 804            xive->tctx_chipid = GETFIELD(PC_TCTXT_CHIPID, val);
 805        }
 806        break;
 807    case PC_TCTXT_TRACK:
 808        /*
 809         * PC_TCTXT_TRACK_EN:
 810         *   enable block tracking and exchange of block ownership
 811         *   information between Interrupt controllers
 812         */
 813        break;
 814
 815    /*
 816     * Misc settings
 817     */
 818    case VC_SBC_CONFIG: /* Store EOI configuration */
 819        /*
 820         * Configure store EOI if required by firwmare (skiboot has removed
 821         * support recently though)
 822         */
 823        if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
 824            xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
 825        }
 826        break;
 827
 828    case VC_EQC_CONFIG: /* TODO: silent escalation */
 829    case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
 830        break;
 831
 832    /*
 833     * XIVE BAR settings (XSCOM only)
 834     */
 835    case CQ_RST_CTL:
 836        /* bit4: resets all BAR registers */
 837        break;
 838
 839    case CQ_IC_BAR: /* IC BAR. 8 pages */
 840        xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
 841        if (!(val & CQ_IC_BAR_VALID)) {
 842            xive->ic_base = 0;
 843            if (xive->regs[reg] & CQ_IC_BAR_VALID) {
 844                memory_region_del_subregion(&xive->ic_mmio,
 845                                            &xive->ic_reg_mmio);
 846                memory_region_del_subregion(&xive->ic_mmio,
 847                                            &xive->ic_notify_mmio);
 848                memory_region_del_subregion(&xive->ic_mmio,
 849                                            &xive->ic_lsi_mmio);
 850                memory_region_del_subregion(&xive->ic_mmio,
 851                                            &xive->tm_indirect_mmio);
 852
 853                memory_region_del_subregion(sysmem, &xive->ic_mmio);
 854            }
 855        } else {
 856            xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
 857            if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
 858                memory_region_add_subregion(sysmem, xive->ic_base,
 859                                            &xive->ic_mmio);
 860
 861                memory_region_add_subregion(&xive->ic_mmio,  0,
 862                                            &xive->ic_reg_mmio);
 863                memory_region_add_subregion(&xive->ic_mmio,
 864                                            1ul << xive->ic_shift,
 865                                            &xive->ic_notify_mmio);
 866                memory_region_add_subregion(&xive->ic_mmio,
 867                                            2ul << xive->ic_shift,
 868                                            &xive->ic_lsi_mmio);
 869                memory_region_add_subregion(&xive->ic_mmio,
 870                                            4ull << xive->ic_shift,
 871                                            &xive->tm_indirect_mmio);
 872            }
 873        }
 874        break;
 875
 876    case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
 877    case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
 878        xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
 879        if (!(val & CQ_TM_BAR_VALID)) {
 880            xive->tm_base = 0;
 881            if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
 882                memory_region_del_subregion(sysmem, &xive->tm_mmio);
 883            }
 884        } else {
 885            xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
 886            if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
 887                memory_region_add_subregion(sysmem, xive->tm_base,
 888                                            &xive->tm_mmio);
 889            }
 890        }
 891        break;
 892
 893    case CQ_PC_BARM:
 894        xive->regs[reg] = val;
 895        memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
 896        break;
 897    case CQ_PC_BAR: /* From 32M to 512G */
 898        if (!(val & CQ_PC_BAR_VALID)) {
 899            xive->pc_base = 0;
 900            if (xive->regs[reg] & CQ_PC_BAR_VALID) {
 901                memory_region_del_subregion(sysmem, &xive->pc_mmio);
 902            }
 903        } else {
 904            xive->pc_base = val & ~(CQ_PC_BAR_VALID);
 905            if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
 906                memory_region_add_subregion(sysmem, xive->pc_base,
 907                                            &xive->pc_mmio);
 908            }
 909        }
 910        break;
 911
 912    case CQ_VC_BARM:
 913        xive->regs[reg] = val;
 914        memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
 915        break;
 916    case CQ_VC_BAR: /* From 64M to 4TB */
 917        if (!(val & CQ_VC_BAR_VALID)) {
 918            xive->vc_base = 0;
 919            if (xive->regs[reg] & CQ_VC_BAR_VALID) {
 920                memory_region_del_subregion(sysmem, &xive->vc_mmio);
 921            }
 922        } else {
 923            xive->vc_base = val & ~(CQ_VC_BAR_VALID);
 924            if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
 925                memory_region_add_subregion(sysmem, xive->vc_base,
 926                                            &xive->vc_mmio);
 927            }
 928        }
 929        break;
 930
 931    /*
 932     * XIVE Table settings.
 933     */
 934    case CQ_TAR: /* Table Address */
 935        break;
 936    case CQ_TDR: /* Table Data */
 937        pnv_xive_table_set_data(xive, val);
 938        break;
 939
 940    /*
 941     * XIVE VC & PC Virtual Structure Table settings
 942     */
 943    case VC_VSD_TABLE_ADDR:
 944    case PC_VSD_TABLE_ADDR: /* Virtual table selector */
 945        break;
 946    case VC_VSD_TABLE_DATA: /* Virtual table setting */
 947    case PC_VSD_TABLE_DATA:
 948        pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
 949        break;
 950
 951    /*
 952     * Interrupt fifo overflow in memory backing store (Not modeled)
 953     */
 954    case VC_IRQ_CONFIG_IPI:
 955    case VC_IRQ_CONFIG_HW:
 956    case VC_IRQ_CONFIG_CASCADE1:
 957    case VC_IRQ_CONFIG_CASCADE2:
 958    case VC_IRQ_CONFIG_REDIST:
 959    case VC_IRQ_CONFIG_IPI_CASC:
 960        break;
 961
 962    /*
 963     * XIVE hardware thread enablement
 964     */
 965    case PC_THREAD_EN_REG0: /* Physical Thread Enable */
 966    case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
 967        break;
 968
 969    case PC_THREAD_EN_REG0_SET:
 970        xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
 971        break;
 972    case PC_THREAD_EN_REG1_SET:
 973        xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
 974        break;
 975    case PC_THREAD_EN_REG0_CLR:
 976        xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
 977        break;
 978    case PC_THREAD_EN_REG1_CLR:
 979        xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
 980        break;
 981
 982    /*
 983     * Indirect TIMA access set up. Defines the PIR of the HW thread
 984     * to use.
 985     */
 986    case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
 987        break;
 988
 989    /*
 990     * XIVE PC & VC cache updates for EAS, NVT and END
 991     */
 992    case VC_IVC_SCRUB_MASK:
 993    case VC_IVC_SCRUB_TRIG:
 994        break;
 995
 996    case VC_EQC_CWATCH_SPEC:
 997        val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
 998        break;
 999    case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1000        break;
1001    case VC_EQC_CWATCH_DAT0:
1002        /* writing to DATA0 triggers the cache write */
1003        xive->regs[reg] = val;
1004        pnv_xive_end_update(xive);
1005        break;
1006    case VC_EQC_SCRUB_MASK:
1007    case VC_EQC_SCRUB_TRIG:
1008        /*
1009         * The scrubbing registers flush the cache in RAM and can also
1010         * invalidate.
1011         */
1012        break;
1013
1014    case PC_VPC_CWATCH_SPEC:
1015        val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1016        break;
1017    case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1018        break;
1019    case PC_VPC_CWATCH_DAT0:
1020        /* writing to DATA0 triggers the cache write */
1021        xive->regs[reg] = val;
1022        pnv_xive_nvt_update(xive);
1023        break;
1024    case PC_VPC_SCRUB_MASK:
1025    case PC_VPC_SCRUB_TRIG:
1026        /*
1027         * The scrubbing registers flush the cache in RAM and can also
1028         * invalidate.
1029         */
1030        break;
1031
1032
1033    /*
1034     * XIVE PC & VC cache invalidation
1035     */
1036    case PC_AT_KILL:
1037        break;
1038    case VC_AT_MACRO_KILL:
1039        break;
1040    case PC_AT_KILL_MASK:
1041    case VC_AT_MACRO_KILL_MASK:
1042        break;
1043
1044    default:
1045        xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1046        return;
1047    }
1048
1049    xive->regs[reg] = val;
1050}
1051
1052static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1053{
1054    PnvXive *xive = PNV_XIVE(opaque);
1055    uint64_t val = 0;
1056    uint32_t reg = offset >> 3;
1057
1058    switch (offset) {
1059    case CQ_CFG_PB_GEN:
1060    case CQ_IC_BAR:
1061    case CQ_TM1_BAR:
1062    case CQ_TM2_BAR:
1063    case CQ_PC_BAR:
1064    case CQ_PC_BARM:
1065    case CQ_VC_BAR:
1066    case CQ_VC_BARM:
1067    case CQ_TAR:
1068    case CQ_TDR:
1069    case CQ_PBI_CTL:
1070
1071    case PC_TCTXT_CFG:
1072    case PC_TCTXT_TRACK:
1073    case PC_TCTXT_INDIR0:
1074    case PC_TCTXT_INDIR1:
1075    case PC_TCTXT_INDIR2:
1076    case PC_TCTXT_INDIR3:
1077    case PC_GLOBAL_CONFIG:
1078
1079    case PC_VPC_SCRUB_MASK:
1080
1081    case VC_GLOBAL_CONFIG:
1082    case VC_AIB_TX_ORDER_TAG2:
1083
1084    case VC_IRQ_CONFIG_IPI:
1085    case VC_IRQ_CONFIG_HW:
1086    case VC_IRQ_CONFIG_CASCADE1:
1087    case VC_IRQ_CONFIG_CASCADE2:
1088    case VC_IRQ_CONFIG_REDIST:
1089    case VC_IRQ_CONFIG_IPI_CASC:
1090
1091    case VC_EQC_SCRUB_MASK:
1092    case VC_IVC_SCRUB_MASK:
1093    case VC_SBC_CONFIG:
1094    case VC_AT_MACRO_KILL_MASK:
1095    case VC_VSD_TABLE_ADDR:
1096    case PC_VSD_TABLE_ADDR:
1097    case VC_VSD_TABLE_DATA:
1098    case PC_VSD_TABLE_DATA:
1099    case PC_THREAD_EN_REG0:
1100    case PC_THREAD_EN_REG1:
1101        val = xive->regs[reg];
1102        break;
1103
1104    /*
1105     * XIVE hardware thread enablement
1106     */
1107    case PC_THREAD_EN_REG0_SET:
1108    case PC_THREAD_EN_REG0_CLR:
1109        val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1110        break;
1111    case PC_THREAD_EN_REG1_SET:
1112    case PC_THREAD_EN_REG1_CLR:
1113        val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1114        break;
1115
1116    case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1117        val = 0xffffff0000000000;
1118        break;
1119
1120    /*
1121     * XIVE PC & VC cache updates for EAS, NVT and END
1122     */
1123    case VC_EQC_CWATCH_SPEC:
1124        xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1125        val = xive->regs[reg];
1126        break;
1127    case VC_EQC_CWATCH_DAT0:
1128        /*
1129         * Load DATA registers from cache with data requested by the
1130         * SPEC register
1131         */
1132        pnv_xive_end_cache_load(xive);
1133        val = xive->regs[reg];
1134        break;
1135    case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1136        val = xive->regs[reg];
1137        break;
1138
1139    case PC_VPC_CWATCH_SPEC:
1140        xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1141        val = xive->regs[reg];
1142        break;
1143    case PC_VPC_CWATCH_DAT0:
1144        /*
1145         * Load DATA registers from cache with data requested by the
1146         * SPEC register
1147         */
1148        pnv_xive_nvt_cache_load(xive);
1149        val = xive->regs[reg];
1150        break;
1151    case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1152        val = xive->regs[reg];
1153        break;
1154
1155    case PC_VPC_SCRUB_TRIG:
1156    case VC_IVC_SCRUB_TRIG:
1157    case VC_EQC_SCRUB_TRIG:
1158        xive->regs[reg] &= ~VC_SCRUB_VALID;
1159        val = xive->regs[reg];
1160        break;
1161
1162    /*
1163     * XIVE PC & VC cache invalidation
1164     */
1165    case PC_AT_KILL:
1166        xive->regs[reg] &= ~PC_AT_KILL_VALID;
1167        val = xive->regs[reg];
1168        break;
1169    case VC_AT_MACRO_KILL:
1170        xive->regs[reg] &= ~VC_KILL_VALID;
1171        val = xive->regs[reg];
1172        break;
1173
1174    /*
1175     * XIVE synchronisation
1176     */
1177    case VC_EQC_CONFIG:
1178        val = VC_EQC_SYNC_MASK;
1179        break;
1180
1181    default:
1182        xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1183    }
1184
1185    return val;
1186}
1187
1188static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1189    .read = pnv_xive_ic_reg_read,
1190    .write = pnv_xive_ic_reg_write,
1191    .endianness = DEVICE_BIG_ENDIAN,
1192    .valid = {
1193        .min_access_size = 8,
1194        .max_access_size = 8,
1195    },
1196    .impl = {
1197        .min_access_size = 8,
1198        .max_access_size = 8,
1199    },
1200};
1201
1202/*
1203 * IC - Notify MMIO port page (write only)
1204 */
1205#define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1206#define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1207#define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1208#define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1209#define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1210#define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1211#define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1212#define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1213
1214/* VC synchronisation */
1215#define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1216#define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1217#define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1218#define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1219#define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1220
1221/* PC synchronisation */
1222#define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1223#define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1224#define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1225
1226static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1227{
1228    uint8_t blk;
1229    uint32_t idx;
1230
1231    if (val & XIVE_TRIGGER_END) {
1232        xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1233                   addr, val);
1234        return;
1235    }
1236
1237    /*
1238     * Forward the source event notification directly to the Router.
1239     * The source interrupt number should already be correctly encoded
1240     * with the chip block id by the sending device (PHB, PSI).
1241     */
1242    blk = XIVE_EAS_BLOCK(val);
1243    idx = XIVE_EAS_INDEX(val);
1244
1245    xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx));
1246}
1247
1248static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1249                                     unsigned size)
1250{
1251    PnvXive *xive = PNV_XIVE(opaque);
1252
1253    /* VC: HW triggers */
1254    switch (addr) {
1255    case 0x000 ... 0x7FF:
1256        pnv_xive_ic_hw_trigger(opaque, addr, val);
1257        break;
1258
1259    /* VC: Forwarded IRQs */
1260    case PNV_XIVE_FORWARD_IPI:
1261    case PNV_XIVE_FORWARD_HW:
1262    case PNV_XIVE_FORWARD_OS_ESC:
1263    case PNV_XIVE_FORWARD_HW_ESC:
1264    case PNV_XIVE_FORWARD_REDIS:
1265        /* TODO: forwarded IRQs. Should be like HW triggers */
1266        xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1267                   addr, val);
1268        break;
1269
1270    /* VC syncs */
1271    case PNV_XIVE_SYNC_IPI:
1272    case PNV_XIVE_SYNC_HW:
1273    case PNV_XIVE_SYNC_OS_ESC:
1274    case PNV_XIVE_SYNC_HW_ESC:
1275    case PNV_XIVE_SYNC_REDIS:
1276        break;
1277
1278    /* PC syncs */
1279    case PNV_XIVE_SYNC_PULL:
1280    case PNV_XIVE_SYNC_PUSH:
1281    case PNV_XIVE_SYNC_VPC:
1282        break;
1283
1284    default:
1285        xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1286    }
1287}
1288
1289static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1290                                        unsigned size)
1291{
1292    PnvXive *xive = PNV_XIVE(opaque);
1293
1294    /* loads are invalid */
1295    xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1296    return -1;
1297}
1298
1299static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1300    .read = pnv_xive_ic_notify_read,
1301    .write = pnv_xive_ic_notify_write,
1302    .endianness = DEVICE_BIG_ENDIAN,
1303    .valid = {
1304        .min_access_size = 8,
1305        .max_access_size = 8,
1306    },
1307    .impl = {
1308        .min_access_size = 8,
1309        .max_access_size = 8,
1310    },
1311};
1312
1313/*
1314 * IC - LSI MMIO handlers (not modeled)
1315 */
1316
1317static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1318                              uint64_t val, unsigned size)
1319{
1320    PnvXive *xive = PNV_XIVE(opaque);
1321
1322    xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1323}
1324
1325static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1326{
1327    PnvXive *xive = PNV_XIVE(opaque);
1328
1329    xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1330    return -1;
1331}
1332
1333static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1334    .read = pnv_xive_ic_lsi_read,
1335    .write = pnv_xive_ic_lsi_write,
1336    .endianness = DEVICE_BIG_ENDIAN,
1337    .valid = {
1338        .min_access_size = 8,
1339        .max_access_size = 8,
1340    },
1341    .impl = {
1342        .min_access_size = 8,
1343        .max_access_size = 8,
1344    },
1345};
1346
1347/*
1348 * IC - Indirect TIMA MMIO handlers
1349 */
1350
1351/*
1352 * When the TIMA is accessed from the indirect page, the thread id
1353 * (PIR) has to be configured in the IC registers before. This is used
1354 * for resets and for debug purpose also.
1355 */
1356static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1357{
1358    uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1359    PowerPCCPU *cpu = NULL;
1360    int pir;
1361
1362    if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1363        xive_error(xive, "IC: no indirect TIMA access in progress");
1364        return NULL;
1365    }
1366
1367    pir = GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir) & 0xff;
1368    cpu = ppc_get_vcpu_by_pir(pir);
1369    if (!cpu) {
1370        xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1371        return NULL;
1372    }
1373
1374    /* Check that HW thread is XIVE enabled */
1375    if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) {
1376        xive_error(xive, "IC: CPU %x is not enabled", pir);
1377    }
1378
1379    return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1380}
1381
1382static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1383                                   uint64_t value, unsigned size)
1384{
1385    XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1386
1387    xive_tctx_tm_write(tctx, offset, value, size);
1388}
1389
1390static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1391                                      unsigned size)
1392{
1393    XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1394
1395    return xive_tctx_tm_read(tctx, offset, size);
1396}
1397
1398static const MemoryRegionOps xive_tm_indirect_ops = {
1399    .read = xive_tm_indirect_read,
1400    .write = xive_tm_indirect_write,
1401    .endianness = DEVICE_BIG_ENDIAN,
1402    .valid = {
1403        .min_access_size = 1,
1404        .max_access_size = 8,
1405    },
1406    .impl = {
1407        .min_access_size = 1,
1408        .max_access_size = 8,
1409    },
1410};
1411
1412/*
1413 * Interrupt controller XSCOM region.
1414 */
1415static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1416{
1417    switch (addr >> 3) {
1418    case X_VC_EQC_CONFIG:
1419        /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1420        return VC_EQC_SYNC_MASK;
1421    default:
1422        return pnv_xive_ic_reg_read(opaque, addr, size);
1423    }
1424}
1425
1426static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1427                                uint64_t val, unsigned size)
1428{
1429    pnv_xive_ic_reg_write(opaque, addr, val, size);
1430}
1431
1432static const MemoryRegionOps pnv_xive_xscom_ops = {
1433    .read = pnv_xive_xscom_read,
1434    .write = pnv_xive_xscom_write,
1435    .endianness = DEVICE_BIG_ENDIAN,
1436    .valid = {
1437        .min_access_size = 8,
1438        .max_access_size = 8,
1439    },
1440    .impl = {
1441        .min_access_size = 8,
1442        .max_access_size = 8,
1443    }
1444};
1445
1446/*
1447 * Virtualization Controller MMIO region containing the IPI and END ESB pages
1448 */
1449static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1450                                 unsigned size)
1451{
1452    PnvXive *xive = PNV_XIVE(opaque);
1453    uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1454    uint64_t edt_type = 0;
1455    uint64_t edt_offset;
1456    MemTxResult result;
1457    AddressSpace *edt_as = NULL;
1458    uint64_t ret = -1;
1459
1460    if (edt_index < XIVE_TABLE_EDT_MAX) {
1461        edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1462    }
1463
1464    switch (edt_type) {
1465    case CQ_TDR_EDT_IPI:
1466        edt_as = &xive->ipi_as;
1467        break;
1468    case CQ_TDR_EDT_EQ:
1469        edt_as = &xive->end_as;
1470        break;
1471    default:
1472        xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1473        return -1;
1474    }
1475
1476    /* Remap the offset for the targeted address space */
1477    edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1478
1479    ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1480                            &result);
1481
1482    if (result != MEMTX_OK) {
1483        xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1484                   HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1485                   offset, edt_offset);
1486        return -1;
1487    }
1488
1489    return ret;
1490}
1491
1492static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1493                              uint64_t val, unsigned size)
1494{
1495    PnvXive *xive = PNV_XIVE(opaque);
1496    uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1497    uint64_t edt_type = 0;
1498    uint64_t edt_offset;
1499    MemTxResult result;
1500    AddressSpace *edt_as = NULL;
1501
1502    if (edt_index < XIVE_TABLE_EDT_MAX) {
1503        edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1504    }
1505
1506    switch (edt_type) {
1507    case CQ_TDR_EDT_IPI:
1508        edt_as = &xive->ipi_as;
1509        break;
1510    case CQ_TDR_EDT_EQ:
1511        edt_as = &xive->end_as;
1512        break;
1513    default:
1514        xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1515                   offset);
1516        return;
1517    }
1518
1519    /* Remap the offset for the targeted address space */
1520    edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1521
1522    address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1523    if (result != MEMTX_OK) {
1524        xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1525    }
1526}
1527
1528static const MemoryRegionOps pnv_xive_vc_ops = {
1529    .read = pnv_xive_vc_read,
1530    .write = pnv_xive_vc_write,
1531    .endianness = DEVICE_BIG_ENDIAN,
1532    .valid = {
1533        .min_access_size = 8,
1534        .max_access_size = 8,
1535    },
1536    .impl = {
1537        .min_access_size = 8,
1538        .max_access_size = 8,
1539    },
1540};
1541
1542/*
1543 * Presenter Controller MMIO region. The Virtualization Controller
1544 * updates the IPB in the NVT table when required. Not modeled.
1545 */
1546static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1547                                 unsigned size)
1548{
1549    PnvXive *xive = PNV_XIVE(opaque);
1550
1551    xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1552    return -1;
1553}
1554
1555static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1556                              uint64_t value, unsigned size)
1557{
1558    PnvXive *xive = PNV_XIVE(opaque);
1559
1560    xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1561}
1562
1563static const MemoryRegionOps pnv_xive_pc_ops = {
1564    .read = pnv_xive_pc_read,
1565    .write = pnv_xive_pc_write,
1566    .endianness = DEVICE_BIG_ENDIAN,
1567    .valid = {
1568        .min_access_size = 8,
1569        .max_access_size = 8,
1570    },
1571    .impl = {
1572        .min_access_size = 8,
1573        .max_access_size = 8,
1574    },
1575};
1576
1577void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1578{
1579    XiveRouter *xrtr = XIVE_ROUTER(xive);
1580    uint8_t blk = xive->chip->chip_id;
1581    uint32_t srcno0 = XIVE_EAS(blk, 0);
1582    uint32_t nr_ipis = pnv_xive_nr_ipis(xive);
1583    uint32_t nr_ends = pnv_xive_nr_ends(xive);
1584    XiveEAS eas;
1585    XiveEND end;
1586    int i;
1587
1588    monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0,
1589                   srcno0 + nr_ipis - 1);
1590    xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1591
1592    monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0,
1593                   srcno0 + nr_ipis - 1);
1594    for (i = 0; i < nr_ipis; i++) {
1595        if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1596            break;
1597        }
1598        if (!xive_eas_is_masked(&eas)) {
1599            xive_eas_pic_print_info(&eas, i, mon);
1600        }
1601    }
1602
1603    monitor_printf(mon, "XIVE[%x] ENDT %08x .. %08x\n", blk, 0, nr_ends - 1);
1604    for (i = 0; i < nr_ends; i++) {
1605        if (xive_router_get_end(xrtr, blk, i, &end)) {
1606            break;
1607        }
1608        xive_end_pic_print_info(&end, i, mon);
1609    }
1610
1611    monitor_printf(mon, "XIVE[%x] END Escalation %08x .. %08x\n", blk, 0,
1612                   nr_ends - 1);
1613    for (i = 0; i < nr_ends; i++) {
1614        if (xive_router_get_end(xrtr, blk, i, &end)) {
1615            break;
1616        }
1617        xive_end_eas_pic_print_info(&end, i, mon);
1618    }
1619}
1620
1621static void pnv_xive_reset(void *dev)
1622{
1623    PnvXive *xive = PNV_XIVE(dev);
1624    XiveSource *xsrc = &xive->ipi_source;
1625    XiveENDSource *end_xsrc = &xive->end_source;
1626
1627    /*
1628     * Use the PnvChip id to identify the XIVE interrupt controller.
1629     * It can be overriden by configuration at runtime.
1630     */
1631    xive->tctx_chipid = xive->chip->chip_id;
1632
1633    /* Default page size (Should be changed at runtime to 64k) */
1634    xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1635
1636    /* Clear subregions */
1637    if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1638        memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1639    }
1640
1641    if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1642        memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1643    }
1644
1645    if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1646        memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1647    }
1648
1649    if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1650        memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1651    }
1652}
1653
1654static void pnv_xive_init(Object *obj)
1655{
1656    PnvXive *xive = PNV_XIVE(obj);
1657
1658    object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1659                            sizeof(xive->ipi_source), TYPE_XIVE_SOURCE,
1660                            &error_abort, NULL);
1661    object_initialize_child(obj, "end_source", &xive->end_source,
1662                            sizeof(xive->end_source), TYPE_XIVE_END_SOURCE,
1663                            &error_abort, NULL);
1664}
1665
1666/*
1667 *  Maximum number of IRQs and ENDs supported by HW
1668 */
1669#define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1670#define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1671
1672static void pnv_xive_realize(DeviceState *dev, Error **errp)
1673{
1674    PnvXive *xive = PNV_XIVE(dev);
1675    XiveSource *xsrc = &xive->ipi_source;
1676    XiveENDSource *end_xsrc = &xive->end_source;
1677    Error *local_err = NULL;
1678    Object *obj;
1679
1680    obj = object_property_get_link(OBJECT(dev), "chip", &local_err);
1681    if (!obj) {
1682        error_propagate(errp, local_err);
1683        error_prepend(errp, "required link 'chip' not found: ");
1684        return;
1685    }
1686
1687    /* The PnvChip id identifies the XIVE interrupt controller. */
1688    xive->chip = PNV_CHIP(obj);
1689
1690    /*
1691     * The XiveSource and XiveENDSource objects are realized with the
1692     * maximum allowed HW configuration. The ESB MMIO regions will be
1693     * resized dynamically when the controller is configured by the FW
1694     * to limit accesses to resources not provisioned.
1695     */
1696    object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs",
1697                            &error_fatal);
1698    object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive),
1699                                   &error_fatal);
1700    object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
1701    if (local_err) {
1702        error_propagate(errp, local_err);
1703        return;
1704    }
1705
1706    object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends",
1707                            &error_fatal);
1708    object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1709                                   &error_fatal);
1710    object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err);
1711    if (local_err) {
1712        error_propagate(errp, local_err);
1713        return;
1714    }
1715
1716    /* Default page size. Generally changed at runtime to 64k */
1717    xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1718
1719    /* XSCOM region, used for initial configuration of the BARs */
1720    memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1721                          xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1722
1723    /* Interrupt controller MMIO regions */
1724    memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1725                       PNV9_XIVE_IC_SIZE);
1726
1727    memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1728                          xive, "xive-ic-reg", 1 << xive->ic_shift);
1729    memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1730                          &pnv_xive_ic_notify_ops,
1731                          xive, "xive-ic-notify", 1 << xive->ic_shift);
1732
1733    /* The Pervasive LSI trigger and EOI pages (not modeled) */
1734    memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1735                          xive, "xive-ic-lsi", 2 << xive->ic_shift);
1736
1737    /* Thread Interrupt Management Area (Indirect) */
1738    memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1739                          &xive_tm_indirect_ops,
1740                          xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1741    /*
1742     * Overall Virtualization Controller MMIO region containing the
1743     * IPI ESB pages and END ESB pages. The layout is defined by the
1744     * EDT "Domain table" and the accesses are dispatched using
1745     * address spaces for each.
1746     */
1747    memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1748                          "xive-vc", PNV9_XIVE_VC_SIZE);
1749
1750    memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1751                       PNV9_XIVE_VC_SIZE);
1752    address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1753    memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1754                       PNV9_XIVE_VC_SIZE);
1755    address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1756
1757    /*
1758     * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1759     * VC region. Their size is configured by the FW in the EDT table.
1760     */
1761    memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1762    memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1763
1764    /* Presenter Controller MMIO region (not modeled) */
1765    memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1766                          "xive-pc", PNV9_XIVE_PC_SIZE);
1767
1768    /* Thread Interrupt Management Area (Direct) */
1769    memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops,
1770                          xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1771
1772    qemu_register_reset(pnv_xive_reset, dev);
1773}
1774
1775static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1776                             int xscom_offset)
1777{
1778    const char compat[] = "ibm,power9-xive-x";
1779    char *name;
1780    int offset;
1781    uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1782    uint32_t reg[] = {
1783        cpu_to_be32(lpc_pcba),
1784        cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1785    };
1786
1787    name = g_strdup_printf("xive@%x", lpc_pcba);
1788    offset = fdt_add_subnode(fdt, xscom_offset, name);
1789    _FDT(offset);
1790    g_free(name);
1791
1792    _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1793    _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1794                      sizeof(compat))));
1795    return 0;
1796}
1797
1798static Property pnv_xive_properties[] = {
1799    DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1800    DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1801    DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1802    DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1803    DEFINE_PROP_END_OF_LIST(),
1804};
1805
1806static void pnv_xive_class_init(ObjectClass *klass, void *data)
1807{
1808    DeviceClass *dc = DEVICE_CLASS(klass);
1809    PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1810    XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1811    XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1812
1813    xdc->dt_xscom = pnv_xive_dt_xscom;
1814
1815    dc->desc = "PowerNV XIVE Interrupt Controller";
1816    dc->realize = pnv_xive_realize;
1817    dc->props = pnv_xive_properties;
1818
1819    xrc->get_eas = pnv_xive_get_eas;
1820    xrc->get_end = pnv_xive_get_end;
1821    xrc->write_end = pnv_xive_write_end;
1822    xrc->get_nvt = pnv_xive_get_nvt;
1823    xrc->write_nvt = pnv_xive_write_nvt;
1824    xrc->get_tctx = pnv_xive_get_tctx;
1825
1826    xnc->notify = pnv_xive_notify;
1827};
1828
1829static const TypeInfo pnv_xive_info = {
1830    .name          = TYPE_PNV_XIVE,
1831    .parent        = TYPE_XIVE_ROUTER,
1832    .instance_init = pnv_xive_init,
1833    .instance_size = sizeof(PnvXive),
1834    .class_init    = pnv_xive_class_init,
1835    .interfaces    = (InterfaceInfo[]) {
1836        { TYPE_PNV_XSCOM_INTERFACE },
1837        { }
1838    }
1839};
1840
1841static void pnv_xive_register_types(void)
1842{
1843    type_register_static(&pnv_xive_info);
1844}
1845
1846type_init(pnv_xive_register_types)
1847