qemu/hw/ppc/spapr_pci.c
<<
>>
Prefs
   1/*
   2 * QEMU sPAPR PCI host originated from Uninorth PCI host
   3 *
   4 * Copyright (c) 2011 Alexey Kardashevskiy, IBM Corporation.
   5 * Copyright (C) 2011 David Gibson, IBM Corporation.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a copy
   8 * of this software and associated documentation files (the "Software"), to deal
   9 * in the Software without restriction, including without limitation the rights
  10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11 * copies of the Software, and to permit persons to whom the Software is
  12 * furnished to do so, subject to the following conditions:
  13 *
  14 * The above copyright notice and this permission notice shall be included in
  15 * all copies or substantial portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23 * THE SOFTWARE.
  24 */
  25#include "qemu/osdep.h"
  26#include "qapi/error.h"
  27#include "qemu-common.h"
  28#include "cpu.h"
  29#include "hw/hw.h"
  30#include "hw/sysbus.h"
  31#include "hw/pci/pci.h"
  32#include "hw/pci/msi.h"
  33#include "hw/pci/msix.h"
  34#include "hw/pci/pci_host.h"
  35#include "hw/ppc/spapr.h"
  36#include "hw/pci-host/spapr.h"
  37#include "exec/address-spaces.h"
  38#include "exec/ram_addr.h"
  39#include <libfdt.h>
  40#include "trace.h"
  41#include "qemu/error-report.h"
  42#include "qapi/qmp/qerror.h"
  43
  44#include "hw/pci/pci_bridge.h"
  45#include "hw/pci/pci_bus.h"
  46#include "hw/ppc/spapr_drc.h"
  47#include "sysemu/device_tree.h"
  48#include "sysemu/kvm.h"
  49#include "sysemu/hostmem.h"
  50
  51#include "hw/vfio/vfio.h"
  52
  53/* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */
  54#define RTAS_QUERY_FN           0
  55#define RTAS_CHANGE_FN          1
  56#define RTAS_RESET_FN           2
  57#define RTAS_CHANGE_MSI_FN      3
  58#define RTAS_CHANGE_MSIX_FN     4
  59
  60/* Interrupt types to return on RTAS_CHANGE_* */
  61#define RTAS_TYPE_MSI           1
  62#define RTAS_TYPE_MSIX          2
  63
  64#define FDT_NAME_MAX          128
  65
  66#define _FDT(exp) \
  67    do { \
  68        int ret = (exp);                                           \
  69        if (ret < 0) {                                             \
  70            return ret;                                            \
  71        }                                                          \
  72    } while (0)
  73
  74sPAPRPHBState *spapr_pci_find_phb(sPAPRMachineState *spapr, uint64_t buid)
  75{
  76    sPAPRPHBState *sphb;
  77
  78    QLIST_FOREACH(sphb, &spapr->phbs, list) {
  79        if (sphb->buid != buid) {
  80            continue;
  81        }
  82        return sphb;
  83    }
  84
  85    return NULL;
  86}
  87
  88PCIDevice *spapr_pci_find_dev(sPAPRMachineState *spapr, uint64_t buid,
  89                              uint32_t config_addr)
  90{
  91    sPAPRPHBState *sphb = spapr_pci_find_phb(spapr, buid);
  92    PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
  93    int bus_num = (config_addr >> 16) & 0xFF;
  94    int devfn = (config_addr >> 8) & 0xFF;
  95
  96    if (!phb) {
  97        return NULL;
  98    }
  99
 100    return pci_find_device(phb->bus, bus_num, devfn);
 101}
 102
 103static uint32_t rtas_pci_cfgaddr(uint32_t arg)
 104{
 105    /* This handles the encoding of extended config space addresses */
 106    return ((arg >> 20) & 0xf00) | (arg & 0xff);
 107}
 108
 109static void finish_read_pci_config(sPAPRMachineState *spapr, uint64_t buid,
 110                                   uint32_t addr, uint32_t size,
 111                                   target_ulong rets)
 112{
 113    PCIDevice *pci_dev;
 114    uint32_t val;
 115
 116    if ((size != 1) && (size != 2) && (size != 4)) {
 117        /* access must be 1, 2 or 4 bytes */
 118        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
 119        return;
 120    }
 121
 122    pci_dev = spapr_pci_find_dev(spapr, buid, addr);
 123    addr = rtas_pci_cfgaddr(addr);
 124
 125    if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
 126        /* Access must be to a valid device, within bounds and
 127         * naturally aligned */
 128        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
 129        return;
 130    }
 131
 132    val = pci_host_config_read_common(pci_dev, addr,
 133                                      pci_config_size(pci_dev), size);
 134
 135    rtas_st(rets, 0, RTAS_OUT_SUCCESS);
 136    rtas_st(rets, 1, val);
 137}
 138
 139static void rtas_ibm_read_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 140                                     uint32_t token, uint32_t nargs,
 141                                     target_ulong args,
 142                                     uint32_t nret, target_ulong rets)
 143{
 144    uint64_t buid;
 145    uint32_t size, addr;
 146
 147    if ((nargs != 4) || (nret != 2)) {
 148        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
 149        return;
 150    }
 151
 152    buid = rtas_ldq(args, 1);
 153    size = rtas_ld(args, 3);
 154    addr = rtas_ld(args, 0);
 155
 156    finish_read_pci_config(spapr, buid, addr, size, rets);
 157}
 158
 159static void rtas_read_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 160                                 uint32_t token, uint32_t nargs,
 161                                 target_ulong args,
 162                                 uint32_t nret, target_ulong rets)
 163{
 164    uint32_t size, addr;
 165
 166    if ((nargs != 2) || (nret != 2)) {
 167        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
 168        return;
 169    }
 170
 171    size = rtas_ld(args, 1);
 172    addr = rtas_ld(args, 0);
 173
 174    finish_read_pci_config(spapr, 0, addr, size, rets);
 175}
 176
 177static void finish_write_pci_config(sPAPRMachineState *spapr, uint64_t buid,
 178                                    uint32_t addr, uint32_t size,
 179                                    uint32_t val, target_ulong rets)
 180{
 181    PCIDevice *pci_dev;
 182
 183    if ((size != 1) && (size != 2) && (size != 4)) {
 184        /* access must be 1, 2 or 4 bytes */
 185        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
 186        return;
 187    }
 188
 189    pci_dev = spapr_pci_find_dev(spapr, buid, addr);
 190    addr = rtas_pci_cfgaddr(addr);
 191
 192    if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
 193        /* Access must be to a valid device, within bounds and
 194         * naturally aligned */
 195        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
 196        return;
 197    }
 198
 199    pci_host_config_write_common(pci_dev, addr, pci_config_size(pci_dev),
 200                                 val, size);
 201
 202    rtas_st(rets, 0, RTAS_OUT_SUCCESS);
 203}
 204
 205static void rtas_ibm_write_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 206                                      uint32_t token, uint32_t nargs,
 207                                      target_ulong args,
 208                                      uint32_t nret, target_ulong rets)
 209{
 210    uint64_t buid;
 211    uint32_t val, size, addr;
 212
 213    if ((nargs != 5) || (nret != 1)) {
 214        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
 215        return;
 216    }
 217
 218    buid = rtas_ldq(args, 1);
 219    val = rtas_ld(args, 4);
 220    size = rtas_ld(args, 3);
 221    addr = rtas_ld(args, 0);
 222
 223    finish_write_pci_config(spapr, buid, addr, size, val, rets);
 224}
 225
 226static void rtas_write_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 227                                  uint32_t token, uint32_t nargs,
 228                                  target_ulong args,
 229                                  uint32_t nret, target_ulong rets)
 230{
 231    uint32_t val, size, addr;
 232
 233    if ((nargs != 3) || (nret != 1)) {
 234        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
 235        return;
 236    }
 237
 238
 239    val = rtas_ld(args, 2);
 240    size = rtas_ld(args, 1);
 241    addr = rtas_ld(args, 0);
 242
 243    finish_write_pci_config(spapr, 0, addr, size, val, rets);
 244}
 245
 246/*
 247 * Set MSI/MSIX message data.
 248 * This is required for msi_notify()/msix_notify() which
 249 * will write at the addresses via spapr_msi_write().
 250 *
 251 * If hwaddr == 0, all entries will have .data == first_irq i.e.
 252 * table will be reset.
 253 */
 254static void spapr_msi_setmsg(PCIDevice *pdev, hwaddr addr, bool msix,
 255                             unsigned first_irq, unsigned req_num)
 256{
 257    unsigned i;
 258    MSIMessage msg = { .address = addr, .data = first_irq };
 259
 260    if (!msix) {
 261        msi_set_message(pdev, msg);
 262        trace_spapr_pci_msi_setup(pdev->name, 0, msg.address);
 263        return;
 264    }
 265
 266    for (i = 0; i < req_num; ++i) {
 267        msix_set_message(pdev, i, msg);
 268        trace_spapr_pci_msi_setup(pdev->name, i, msg.address);
 269        if (addr) {
 270            ++msg.data;
 271        }
 272    }
 273}
 274
 275static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 276                                uint32_t token, uint32_t nargs,
 277                                target_ulong args, uint32_t nret,
 278                                target_ulong rets)
 279{
 280    uint32_t config_addr = rtas_ld(args, 0);
 281    uint64_t buid = rtas_ldq(args, 1);
 282    unsigned int func = rtas_ld(args, 3);
 283    unsigned int req_num = rtas_ld(args, 4); /* 0 == remove all */
 284    unsigned int seq_num = rtas_ld(args, 5);
 285    unsigned int ret_intr_type;
 286    unsigned int irq, max_irqs = 0;
 287    sPAPRPHBState *phb = NULL;
 288    PCIDevice *pdev = NULL;
 289    spapr_pci_msi *msi;
 290    int *config_addr_key;
 291    Error *err = NULL;
 292
 293    switch (func) {
 294    case RTAS_CHANGE_MSI_FN:
 295    case RTAS_CHANGE_FN:
 296        ret_intr_type = RTAS_TYPE_MSI;
 297        break;
 298    case RTAS_CHANGE_MSIX_FN:
 299        ret_intr_type = RTAS_TYPE_MSIX;
 300        break;
 301    default:
 302        error_report("rtas_ibm_change_msi(%u) is not implemented", func);
 303        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
 304        return;
 305    }
 306
 307    /* Fins sPAPRPHBState */
 308    phb = spapr_pci_find_phb(spapr, buid);
 309    if (phb) {
 310        pdev = spapr_pci_find_dev(spapr, buid, config_addr);
 311    }
 312    if (!phb || !pdev) {
 313        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
 314        return;
 315    }
 316
 317    msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
 318
 319    /* Releasing MSIs */
 320    if (!req_num) {
 321        if (!msi) {
 322            trace_spapr_pci_msi("Releasing wrong config", config_addr);
 323            rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
 324            return;
 325        }
 326
 327        xics_spapr_free(spapr->xics, msi->first_irq, msi->num);
 328        if (msi_present(pdev)) {
 329            spapr_msi_setmsg(pdev, 0, false, 0, 0);
 330        }
 331        if (msix_present(pdev)) {
 332            spapr_msi_setmsg(pdev, 0, true, 0, 0);
 333        }
 334        g_hash_table_remove(phb->msi, &config_addr);
 335
 336        trace_spapr_pci_msi("Released MSIs", config_addr);
 337        rtas_st(rets, 0, RTAS_OUT_SUCCESS);
 338        rtas_st(rets, 1, 0);
 339        return;
 340    }
 341
 342    /* Enabling MSI */
 343
 344    /* Check if the device supports as many IRQs as requested */
 345    if (ret_intr_type == RTAS_TYPE_MSI) {
 346        max_irqs = msi_nr_vectors_allocated(pdev);
 347    } else if (ret_intr_type == RTAS_TYPE_MSIX) {
 348        max_irqs = pdev->msix_entries_nr;
 349    }
 350    if (!max_irqs) {
 351        error_report("Requested interrupt type %d is not enabled for device %x",
 352                     ret_intr_type, config_addr);
 353        rtas_st(rets, 0, -1); /* Hardware error */
 354        return;
 355    }
 356    /* Correct the number if the guest asked for too many */
 357    if (req_num > max_irqs) {
 358        trace_spapr_pci_msi_retry(config_addr, req_num, max_irqs);
 359        req_num = max_irqs;
 360        irq = 0; /* to avoid misleading trace */
 361        goto out;
 362    }
 363
 364    /* Allocate MSIs */
 365    irq = xics_spapr_alloc_block(spapr->xics, 0, req_num, false,
 366                           ret_intr_type == RTAS_TYPE_MSI, &err);
 367    if (err) {
 368        error_reportf_err(err, "Can't allocate MSIs for device %x: ",
 369                          config_addr);
 370        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
 371        return;
 372    }
 373
 374    /* Release previous MSIs */
 375    if (msi) {
 376        xics_spapr_free(spapr->xics, msi->first_irq, msi->num);
 377        g_hash_table_remove(phb->msi, &config_addr);
 378    }
 379
 380    /* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */
 381    spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX,
 382                     irq, req_num);
 383
 384    /* Add MSI device to cache */
 385    msi = g_new(spapr_pci_msi, 1);
 386    msi->first_irq = irq;
 387    msi->num = req_num;
 388    config_addr_key = g_new(int, 1);
 389    *config_addr_key = config_addr;
 390    g_hash_table_insert(phb->msi, config_addr_key, msi);
 391
 392out:
 393    rtas_st(rets, 0, RTAS_OUT_SUCCESS);
 394    rtas_st(rets, 1, req_num);
 395    rtas_st(rets, 2, ++seq_num);
 396    if (nret > 3) {
 397        rtas_st(rets, 3, ret_intr_type);
 398    }
 399
 400    trace_spapr_pci_rtas_ibm_change_msi(config_addr, func, req_num, irq);
 401}
 402
 403static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
 404                                                   sPAPRMachineState *spapr,
 405                                                   uint32_t token,
 406                                                   uint32_t nargs,
 407                                                   target_ulong args,
 408                                                   uint32_t nret,
 409                                                   target_ulong rets)
 410{
 411    uint32_t config_addr = rtas_ld(args, 0);
 412    uint64_t buid = rtas_ldq(args, 1);
 413    unsigned int intr_src_num = -1, ioa_intr_num = rtas_ld(args, 3);
 414    sPAPRPHBState *phb = NULL;
 415    PCIDevice *pdev = NULL;
 416    spapr_pci_msi *msi;
 417
 418    /* Find sPAPRPHBState */
 419    phb = spapr_pci_find_phb(spapr, buid);
 420    if (phb) {
 421        pdev = spapr_pci_find_dev(spapr, buid, config_addr);
 422    }
 423    if (!phb || !pdev) {
 424        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
 425        return;
 426    }
 427
 428    /* Find device descriptor and start IRQ */
 429    msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
 430    if (!msi || !msi->first_irq || !msi->num || (ioa_intr_num >= msi->num)) {
 431        trace_spapr_pci_msi("Failed to return vector", config_addr);
 432        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
 433        return;
 434    }
 435    intr_src_num = msi->first_irq + ioa_intr_num;
 436    trace_spapr_pci_rtas_ibm_query_interrupt_source_number(ioa_intr_num,
 437                                                           intr_src_num);
 438
 439    rtas_st(rets, 0, RTAS_OUT_SUCCESS);
 440    rtas_st(rets, 1, intr_src_num);
 441    rtas_st(rets, 2, 1);/* 0 == level; 1 == edge */
 442}
 443
 444static void rtas_ibm_set_eeh_option(PowerPCCPU *cpu,
 445                                    sPAPRMachineState *spapr,
 446                                    uint32_t token, uint32_t nargs,
 447                                    target_ulong args, uint32_t nret,
 448                                    target_ulong rets)
 449{
 450    sPAPRPHBState *sphb;
 451    uint32_t addr, option;
 452    uint64_t buid;
 453    int ret;
 454
 455    if ((nargs != 4) || (nret != 1)) {
 456        goto param_error_exit;
 457    }
 458
 459    buid = rtas_ldq(args, 1);
 460    addr = rtas_ld(args, 0);
 461    option = rtas_ld(args, 3);
 462
 463    sphb = spapr_pci_find_phb(spapr, buid);
 464    if (!sphb) {
 465        goto param_error_exit;
 466    }
 467
 468    if (!spapr_phb_eeh_available(sphb)) {
 469        goto param_error_exit;
 470    }
 471
 472    ret = spapr_phb_vfio_eeh_set_option(sphb, addr, option);
 473    rtas_st(rets, 0, ret);
 474    return;
 475
 476param_error_exit:
 477    rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
 478}
 479
 480static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu,
 481                                           sPAPRMachineState *spapr,
 482                                           uint32_t token, uint32_t nargs,
 483                                           target_ulong args, uint32_t nret,
 484                                           target_ulong rets)
 485{
 486    sPAPRPHBState *sphb;
 487    PCIDevice *pdev;
 488    uint32_t addr, option;
 489    uint64_t buid;
 490
 491    if ((nargs != 4) || (nret != 2)) {
 492        goto param_error_exit;
 493    }
 494
 495    buid = rtas_ldq(args, 1);
 496    sphb = spapr_pci_find_phb(spapr, buid);
 497    if (!sphb) {
 498        goto param_error_exit;
 499    }
 500
 501    if (!spapr_phb_eeh_available(sphb)) {
 502        goto param_error_exit;
 503    }
 504
 505    /*
 506     * We always have PE address of form "00BB0001". "BB"
 507     * represents the bus number of PE's primary bus.
 508     */
 509    option = rtas_ld(args, 3);
 510    switch (option) {
 511    case RTAS_GET_PE_ADDR:
 512        addr = rtas_ld(args, 0);
 513        pdev = spapr_pci_find_dev(spapr, buid, addr);
 514        if (!pdev) {
 515            goto param_error_exit;
 516        }
 517
 518        rtas_st(rets, 1, (pci_bus_num(pdev->bus) << 16) + 1);
 519        break;
 520    case RTAS_GET_PE_MODE:
 521        rtas_st(rets, 1, RTAS_PE_MODE_SHARED);
 522        break;
 523    default:
 524        goto param_error_exit;
 525    }
 526
 527    rtas_st(rets, 0, RTAS_OUT_SUCCESS);
 528    return;
 529
 530param_error_exit:
 531    rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
 532}
 533
 534static void rtas_ibm_read_slot_reset_state2(PowerPCCPU *cpu,
 535                                            sPAPRMachineState *spapr,
 536                                            uint32_t token, uint32_t nargs,
 537                                            target_ulong args, uint32_t nret,
 538                                            target_ulong rets)
 539{
 540    sPAPRPHBState *sphb;
 541    uint64_t buid;
 542    int state, ret;
 543
 544    if ((nargs != 3) || (nret != 4 && nret != 5)) {
 545        goto param_error_exit;
 546    }
 547
 548    buid = rtas_ldq(args, 1);
 549    sphb = spapr_pci_find_phb(spapr, buid);
 550    if (!sphb) {
 551        goto param_error_exit;
 552    }
 553
 554    if (!spapr_phb_eeh_available(sphb)) {
 555        goto param_error_exit;
 556    }
 557
 558    ret = spapr_phb_vfio_eeh_get_state(sphb, &state);
 559    rtas_st(rets, 0, ret);
 560    if (ret != RTAS_OUT_SUCCESS) {
 561        return;
 562    }
 563
 564    rtas_st(rets, 1, state);
 565    rtas_st(rets, 2, RTAS_EEH_SUPPORT);
 566    rtas_st(rets, 3, RTAS_EEH_PE_UNAVAIL_INFO);
 567    if (nret >= 5) {
 568        rtas_st(rets, 4, RTAS_EEH_PE_RECOVER_INFO);
 569    }
 570    return;
 571
 572param_error_exit:
 573    rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
 574}
 575
 576static void rtas_ibm_set_slot_reset(PowerPCCPU *cpu,
 577                                    sPAPRMachineState *spapr,
 578                                    uint32_t token, uint32_t nargs,
 579                                    target_ulong args, uint32_t nret,
 580                                    target_ulong rets)
 581{
 582    sPAPRPHBState *sphb;
 583    uint32_t option;
 584    uint64_t buid;
 585    int ret;
 586
 587    if ((nargs != 4) || (nret != 1)) {
 588        goto param_error_exit;
 589    }
 590
 591    buid = rtas_ldq(args, 1);
 592    option = rtas_ld(args, 3);
 593    sphb = spapr_pci_find_phb(spapr, buid);
 594    if (!sphb) {
 595        goto param_error_exit;
 596    }
 597
 598    if (!spapr_phb_eeh_available(sphb)) {
 599        goto param_error_exit;
 600    }
 601
 602    ret = spapr_phb_vfio_eeh_reset(sphb, option);
 603    rtas_st(rets, 0, ret);
 604    return;
 605
 606param_error_exit:
 607    rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
 608}
 609
 610static void rtas_ibm_configure_pe(PowerPCCPU *cpu,
 611                                  sPAPRMachineState *spapr,
 612                                  uint32_t token, uint32_t nargs,
 613                                  target_ulong args, uint32_t nret,
 614                                  target_ulong rets)
 615{
 616    sPAPRPHBState *sphb;
 617    uint64_t buid;
 618    int ret;
 619
 620    if ((nargs != 3) || (nret != 1)) {
 621        goto param_error_exit;
 622    }
 623
 624    buid = rtas_ldq(args, 1);
 625    sphb = spapr_pci_find_phb(spapr, buid);
 626    if (!sphb) {
 627        goto param_error_exit;
 628    }
 629
 630    if (!spapr_phb_eeh_available(sphb)) {
 631        goto param_error_exit;
 632    }
 633
 634    ret = spapr_phb_vfio_eeh_configure(sphb);
 635    rtas_st(rets, 0, ret);
 636    return;
 637
 638param_error_exit:
 639    rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
 640}
 641
 642/* To support it later */
 643static void rtas_ibm_slot_error_detail(PowerPCCPU *cpu,
 644                                       sPAPRMachineState *spapr,
 645                                       uint32_t token, uint32_t nargs,
 646                                       target_ulong args, uint32_t nret,
 647                                       target_ulong rets)
 648{
 649    sPAPRPHBState *sphb;
 650    int option;
 651    uint64_t buid;
 652
 653    if ((nargs != 8) || (nret != 1)) {
 654        goto param_error_exit;
 655    }
 656
 657    buid = rtas_ldq(args, 1);
 658    sphb = spapr_pci_find_phb(spapr, buid);
 659    if (!sphb) {
 660        goto param_error_exit;
 661    }
 662
 663    if (!spapr_phb_eeh_available(sphb)) {
 664        goto param_error_exit;
 665    }
 666
 667    option = rtas_ld(args, 7);
 668    switch (option) {
 669    case RTAS_SLOT_TEMP_ERR_LOG:
 670    case RTAS_SLOT_PERM_ERR_LOG:
 671        break;
 672    default:
 673        goto param_error_exit;
 674    }
 675
 676    /* We don't have error log yet */
 677    rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
 678    return;
 679
 680param_error_exit:
 681    rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
 682}
 683
 684static int pci_spapr_swizzle(int slot, int pin)
 685{
 686    return (slot + pin) % PCI_NUM_PINS;
 687}
 688
 689static int pci_spapr_map_irq(PCIDevice *pci_dev, int irq_num)
 690{
 691    /*
 692     * Here we need to convert pci_dev + irq_num to some unique value
 693     * which is less than number of IRQs on the specific bus (4).  We
 694     * use standard PCI swizzling, that is (slot number + pin number)
 695     * % 4.
 696     */
 697    return pci_spapr_swizzle(PCI_SLOT(pci_dev->devfn), irq_num);
 698}
 699
 700static void pci_spapr_set_irq(void *opaque, int irq_num, int level)
 701{
 702    /*
 703     * Here we use the number returned by pci_spapr_map_irq to find a
 704     * corresponding qemu_irq.
 705     */
 706    sPAPRPHBState *phb = opaque;
 707
 708    trace_spapr_pci_lsi_set(phb->dtbusname, irq_num, phb->lsi_table[irq_num].irq);
 709    qemu_set_irq(spapr_phb_lsi_qirq(phb, irq_num), level);
 710}
 711
 712static PCIINTxRoute spapr_route_intx_pin_to_irq(void *opaque, int pin)
 713{
 714    sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(opaque);
 715    PCIINTxRoute route;
 716
 717    route.mode = PCI_INTX_ENABLED;
 718    route.irq = sphb->lsi_table[pin].irq;
 719
 720    return route;
 721}
 722
 723/*
 724 * MSI/MSIX memory region implementation.
 725 * The handler handles both MSI and MSIX.
 726 * For MSI-X, the vector number is encoded as a part of the address,
 727 * data is set to 0.
 728 * For MSI, the vector number is encoded in least bits in data.
 729 */
 730static void spapr_msi_write(void *opaque, hwaddr addr,
 731                            uint64_t data, unsigned size)
 732{
 733    sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
 734    uint32_t irq = data;
 735
 736    trace_spapr_pci_msi_write(addr, data, irq);
 737
 738    qemu_irq_pulse(xics_get_qirq(spapr->xics, irq));
 739}
 740
 741static const MemoryRegionOps spapr_msi_ops = {
 742    /* There is no .read as the read result is undefined by PCI spec */
 743    .read = NULL,
 744    .write = spapr_msi_write,
 745    .endianness = DEVICE_LITTLE_ENDIAN
 746};
 747
 748/*
 749 * PHB PCI device
 750 */
 751static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
 752{
 753    sPAPRPHBState *phb = opaque;
 754
 755    return &phb->iommu_as;
 756}
 757
 758static char *spapr_phb_vfio_get_loc_code(sPAPRPHBState *sphb,  PCIDevice *pdev)
 759{
 760    char *path = NULL, *buf = NULL, *host = NULL;
 761
 762    /* Get the PCI VFIO host id */
 763    host = object_property_get_str(OBJECT(pdev), "host", NULL);
 764    if (!host) {
 765        goto err_out;
 766    }
 767
 768    /* Construct the path of the file that will give us the DT location */
 769    path = g_strdup_printf("/sys/bus/pci/devices/%s/devspec", host);
 770    g_free(host);
 771    if (!path || !g_file_get_contents(path, &buf, NULL, NULL)) {
 772        goto err_out;
 773    }
 774    g_free(path);
 775
 776    /* Construct and read from host device tree the loc-code */
 777    path = g_strdup_printf("/proc/device-tree%s/ibm,loc-code", buf);
 778    g_free(buf);
 779    if (!path || !g_file_get_contents(path, &buf, NULL, NULL)) {
 780        goto err_out;
 781    }
 782    return buf;
 783
 784err_out:
 785    g_free(path);
 786    return NULL;
 787}
 788
 789static char *spapr_phb_get_loc_code(sPAPRPHBState *sphb, PCIDevice *pdev)
 790{
 791    char *buf;
 792    const char *devtype = "qemu";
 793    uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))));
 794
 795    if (object_dynamic_cast(OBJECT(pdev), "vfio-pci")) {
 796        buf = spapr_phb_vfio_get_loc_code(sphb, pdev);
 797        if (buf) {
 798            return buf;
 799        }
 800        devtype = "vfio";
 801    }
 802    /*
 803     * For emulated devices and VFIO-failure case, make up
 804     * the loc-code.
 805     */
 806    buf = g_strdup_printf("%s_%s:%04x:%02x:%02x.%x",
 807                          devtype, pdev->name, sphb->index, busnr,
 808                          PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
 809    return buf;
 810}
 811
 812/* Macros to operate with address in OF binding to PCI */
 813#define b_x(x, p, l)    (((x) & ((1<<(l))-1)) << (p))
 814#define b_n(x)          b_x((x), 31, 1) /* 0 if relocatable */
 815#define b_p(x)          b_x((x), 30, 1) /* 1 if prefetchable */
 816#define b_t(x)          b_x((x), 29, 1) /* 1 if the address is aliased */
 817#define b_ss(x)         b_x((x), 24, 2) /* the space code */
 818#define b_bbbbbbbb(x)   b_x((x), 16, 8) /* bus number */
 819#define b_ddddd(x)      b_x((x), 11, 5) /* device number */
 820#define b_fff(x)        b_x((x), 8, 3)  /* function number */
 821#define b_rrrrrrrr(x)   b_x((x), 0, 8)  /* register number */
 822
 823/* for 'reg'/'assigned-addresses' OF properties */
 824#define RESOURCE_CELLS_SIZE 2
 825#define RESOURCE_CELLS_ADDRESS 3
 826
 827typedef struct ResourceFields {
 828    uint32_t phys_hi;
 829    uint32_t phys_mid;
 830    uint32_t phys_lo;
 831    uint32_t size_hi;
 832    uint32_t size_lo;
 833} QEMU_PACKED ResourceFields;
 834
 835typedef struct ResourceProps {
 836    ResourceFields reg[8];
 837    ResourceFields assigned[7];
 838    uint32_t reg_len;
 839    uint32_t assigned_len;
 840} ResourceProps;
 841
 842/* fill in the 'reg'/'assigned-resources' OF properties for
 843 * a PCI device. 'reg' describes resource requirements for a
 844 * device's IO/MEM regions, 'assigned-addresses' describes the
 845 * actual resource assignments.
 846 *
 847 * the properties are arrays of ('phys-addr', 'size') pairs describing
 848 * the addressable regions of the PCI device, where 'phys-addr' is a
 849 * RESOURCE_CELLS_ADDRESS-tuple of 32-bit integers corresponding to
 850 * (phys.hi, phys.mid, phys.lo), and 'size' is a
 851 * RESOURCE_CELLS_SIZE-tuple corresponding to (size.hi, size.lo).
 852 *
 853 * phys.hi = 0xYYXXXXZZ, where:
 854 *   0xYY = npt000ss
 855 *          |||   |
 856 *          |||   +-- space code
 857 *          |||               |
 858 *          |||               +  00 if configuration space
 859 *          |||               +  01 if IO region,
 860 *          |||               +  10 if 32-bit MEM region
 861 *          |||               +  11 if 64-bit MEM region
 862 *          |||
 863 *          ||+------ for non-relocatable IO: 1 if aliased
 864 *          ||        for relocatable IO: 1 if below 64KB
 865 *          ||        for MEM: 1 if below 1MB
 866 *          |+------- 1 if region is prefetchable
 867 *          +-------- 1 if region is non-relocatable
 868 *   0xXXXX = bbbbbbbb dddddfff, encoding bus, slot, and function
 869 *            bits respectively
 870 *   0xZZ = rrrrrrrr, the register number of the BAR corresponding
 871 *          to the region
 872 *
 873 * phys.mid and phys.lo correspond respectively to the hi/lo portions
 874 * of the actual address of the region.
 875 *
 876 * how the phys-addr/size values are used differ slightly between
 877 * 'reg' and 'assigned-addresses' properties. namely, 'reg' has
 878 * an additional description for the config space region of the
 879 * device, and in the case of QEMU has n=0 and phys.mid=phys.lo=0
 880 * to describe the region as relocatable, with an address-mapping
 881 * that corresponds directly to the PHB's address space for the
 882 * resource. 'assigned-addresses' always has n=1 set with an absolute
 883 * address assigned for the resource. in general, 'assigned-addresses'
 884 * won't be populated, since addresses for PCI devices are generally
 885 * unmapped initially and left to the guest to assign.
 886 *
 887 * note also that addresses defined in these properties are, at least
 888 * for PAPR guests, relative to the PHBs IO/MEM windows, and
 889 * correspond directly to the addresses in the BARs.
 890 *
 891 * in accordance with PCI Bus Binding to Open Firmware,
 892 * IEEE Std 1275-1994, section 4.1.1, as implemented by PAPR+ v2.7,
 893 * Appendix C.
 894 */
 895static void populate_resource_props(PCIDevice *d, ResourceProps *rp)
 896{
 897    int bus_num = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(d))));
 898    uint32_t dev_id = (b_bbbbbbbb(bus_num) |
 899                       b_ddddd(PCI_SLOT(d->devfn)) |
 900                       b_fff(PCI_FUNC(d->devfn)));
 901    ResourceFields *reg, *assigned;
 902    int i, reg_idx = 0, assigned_idx = 0;
 903
 904    /* config space region */
 905    reg = &rp->reg[reg_idx++];
 906    reg->phys_hi = cpu_to_be32(dev_id);
 907    reg->phys_mid = 0;
 908    reg->phys_lo = 0;
 909    reg->size_hi = 0;
 910    reg->size_lo = 0;
 911
 912    for (i = 0; i < PCI_NUM_REGIONS; i++) {
 913        if (!d->io_regions[i].size) {
 914            continue;
 915        }
 916
 917        reg = &rp->reg[reg_idx++];
 918
 919        reg->phys_hi = cpu_to_be32(dev_id | b_rrrrrrrr(pci_bar(d, i)));
 920        if (d->io_regions[i].type & PCI_BASE_ADDRESS_SPACE_IO) {
 921            reg->phys_hi |= cpu_to_be32(b_ss(1));
 922        } else if (d->io_regions[i].type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
 923            reg->phys_hi |= cpu_to_be32(b_ss(3));
 924        } else {
 925            reg->phys_hi |= cpu_to_be32(b_ss(2));
 926        }
 927        reg->phys_mid = 0;
 928        reg->phys_lo = 0;
 929        reg->size_hi = cpu_to_be32(d->io_regions[i].size >> 32);
 930        reg->size_lo = cpu_to_be32(d->io_regions[i].size);
 931
 932        if (d->io_regions[i].addr == PCI_BAR_UNMAPPED) {
 933            continue;
 934        }
 935
 936        assigned = &rp->assigned[assigned_idx++];
 937        assigned->phys_hi = cpu_to_be32(reg->phys_hi | b_n(1));
 938        assigned->phys_mid = cpu_to_be32(d->io_regions[i].addr >> 32);
 939        assigned->phys_lo = cpu_to_be32(d->io_regions[i].addr);
 940        assigned->size_hi = reg->size_hi;
 941        assigned->size_lo = reg->size_lo;
 942    }
 943
 944    rp->reg_len = reg_idx * sizeof(ResourceFields);
 945    rp->assigned_len = assigned_idx * sizeof(ResourceFields);
 946}
 947
 948static uint32_t spapr_phb_get_pci_drc_index(sPAPRPHBState *phb,
 949                                            PCIDevice *pdev);
 950
 951static int spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset,
 952                                       sPAPRPHBState *sphb)
 953{
 954    ResourceProps rp;
 955    bool is_bridge = false;
 956    int pci_status, err;
 957    char *buf = NULL;
 958    uint32_t drc_index = spapr_phb_get_pci_drc_index(sphb, dev);
 959    uint32_t max_msi, max_msix;
 960
 961    if (pci_default_read_config(dev, PCI_HEADER_TYPE, 1) ==
 962        PCI_HEADER_TYPE_BRIDGE) {
 963        is_bridge = true;
 964    }
 965
 966    /* in accordance with PAPR+ v2.7 13.6.3, Table 181 */
 967    _FDT(fdt_setprop_cell(fdt, offset, "vendor-id",
 968                          pci_default_read_config(dev, PCI_VENDOR_ID, 2)));
 969    _FDT(fdt_setprop_cell(fdt, offset, "device-id",
 970                          pci_default_read_config(dev, PCI_DEVICE_ID, 2)));
 971    _FDT(fdt_setprop_cell(fdt, offset, "revision-id",
 972                          pci_default_read_config(dev, PCI_REVISION_ID, 1)));
 973    _FDT(fdt_setprop_cell(fdt, offset, "class-code",
 974                          pci_default_read_config(dev, PCI_CLASS_PROG, 3)));
 975    if (pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)) {
 976        _FDT(fdt_setprop_cell(fdt, offset, "interrupts",
 977                 pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)));
 978    }
 979
 980    if (!is_bridge) {
 981        _FDT(fdt_setprop_cell(fdt, offset, "min-grant",
 982            pci_default_read_config(dev, PCI_MIN_GNT, 1)));
 983        _FDT(fdt_setprop_cell(fdt, offset, "max-latency",
 984            pci_default_read_config(dev, PCI_MAX_LAT, 1)));
 985    }
 986
 987    if (pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)) {
 988        _FDT(fdt_setprop_cell(fdt, offset, "subsystem-id",
 989                 pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)));
 990    }
 991
 992    if (pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)) {
 993        _FDT(fdt_setprop_cell(fdt, offset, "subsystem-vendor-id",
 994                 pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)));
 995    }
 996
 997    _FDT(fdt_setprop_cell(fdt, offset, "cache-line-size",
 998        pci_default_read_config(dev, PCI_CACHE_LINE_SIZE, 1)));
 999
1000    /* the following fdt cells are masked off the pci status register */
1001    pci_status = pci_default_read_config(dev, PCI_STATUS, 2);
1002    _FDT(fdt_setprop_cell(fdt, offset, "devsel-speed",
1003                          PCI_STATUS_DEVSEL_MASK & pci_status));
1004
1005    if (pci_status & PCI_STATUS_FAST_BACK) {
1006        _FDT(fdt_setprop(fdt, offset, "fast-back-to-back", NULL, 0));
1007    }
1008    if (pci_status & PCI_STATUS_66MHZ) {
1009        _FDT(fdt_setprop(fdt, offset, "66mhz-capable", NULL, 0));
1010    }
1011    if (pci_status & PCI_STATUS_UDF) {
1012        _FDT(fdt_setprop(fdt, offset, "udf-supported", NULL, 0));
1013    }
1014
1015    /* NOTE: this is normally generated by firmware via path/unit name,
1016     * but in our case we must set it manually since it does not get
1017     * processed by OF beforehand
1018     */
1019    _FDT(fdt_setprop_string(fdt, offset, "name", "pci"));
1020    buf = spapr_phb_get_loc_code(sphb, dev);
1021    if (!buf) {
1022        error_report("Failed setting the ibm,loc-code");
1023        return -1;
1024    }
1025
1026    err = fdt_setprop_string(fdt, offset, "ibm,loc-code", buf);
1027    g_free(buf);
1028    if (err < 0) {
1029        return err;
1030    }
1031
1032    if (drc_index) {
1033        _FDT(fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index));
1034    }
1035
1036    _FDT(fdt_setprop_cell(fdt, offset, "#address-cells",
1037                          RESOURCE_CELLS_ADDRESS));
1038    _FDT(fdt_setprop_cell(fdt, offset, "#size-cells",
1039                          RESOURCE_CELLS_SIZE));
1040
1041    max_msi = msi_nr_vectors_allocated(dev);
1042    if (max_msi) {
1043        _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi", max_msi));
1044    }
1045    max_msix = dev->msix_entries_nr;
1046    if (max_msix) {
1047        _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi-x", max_msix));
1048    }
1049
1050    populate_resource_props(dev, &rp);
1051    _FDT(fdt_setprop(fdt, offset, "reg", (uint8_t *)rp.reg, rp.reg_len));
1052    _FDT(fdt_setprop(fdt, offset, "assigned-addresses",
1053                     (uint8_t *)rp.assigned, rp.assigned_len));
1054
1055    return 0;
1056}
1057
1058/* create OF node for pci device and required OF DT properties */
1059static int spapr_create_pci_child_dt(sPAPRPHBState *phb, PCIDevice *dev,
1060                                     void *fdt, int node_offset)
1061{
1062    int offset, ret;
1063    int slot = PCI_SLOT(dev->devfn);
1064    int func = PCI_FUNC(dev->devfn);
1065    char nodename[FDT_NAME_MAX];
1066
1067    if (func != 0) {
1068        snprintf(nodename, FDT_NAME_MAX, "pci@%x,%x", slot, func);
1069    } else {
1070        snprintf(nodename, FDT_NAME_MAX, "pci@%x", slot);
1071    }
1072    offset = fdt_add_subnode(fdt, node_offset, nodename);
1073    ret = spapr_populate_pci_child_dt(dev, fdt, offset, phb);
1074
1075    g_assert(!ret);
1076    if (ret) {
1077        return 0;
1078    }
1079    return offset;
1080}
1081
1082static void spapr_phb_add_pci_device(sPAPRDRConnector *drc,
1083                                     sPAPRPHBState *phb,
1084                                     PCIDevice *pdev,
1085                                     Error **errp)
1086{
1087    sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1088    DeviceState *dev = DEVICE(pdev);
1089    void *fdt = NULL;
1090    int fdt_start_offset = 0, fdt_size;
1091
1092    fdt = create_device_tree(&fdt_size);
1093    fdt_start_offset = spapr_create_pci_child_dt(phb, pdev, fdt, 0);
1094    if (!fdt_start_offset) {
1095        error_setg(errp, "Failed to create pci child device tree node");
1096        goto out;
1097    }
1098
1099    drck->attach(drc, DEVICE(pdev),
1100                 fdt, fdt_start_offset, !dev->hotplugged, errp);
1101out:
1102    if (*errp) {
1103        g_free(fdt);
1104    }
1105}
1106
1107static void spapr_phb_remove_pci_device_cb(DeviceState *dev, void *opaque)
1108{
1109    /* some version guests do not wait for completion of a device
1110     * cleanup (generally done asynchronously by the kernel) before
1111     * signaling to QEMU that the device is safe, but instead sleep
1112     * for some 'safe' period of time. unfortunately on a busy host
1113     * this sleep isn't guaranteed to be long enough, resulting in
1114     * bad things like IRQ lines being left asserted during final
1115     * device removal. to deal with this we call reset just prior
1116     * to finalizing the device, which will put the device back into
1117     * an 'idle' state, as the device cleanup code expects.
1118     */
1119    pci_device_reset(PCI_DEVICE(dev));
1120    object_unparent(OBJECT(dev));
1121}
1122
1123static void spapr_phb_remove_pci_device(sPAPRDRConnector *drc,
1124                                        sPAPRPHBState *phb,
1125                                        PCIDevice *pdev,
1126                                        Error **errp)
1127{
1128    sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1129
1130    drck->detach(drc, DEVICE(pdev), spapr_phb_remove_pci_device_cb, phb, errp);
1131}
1132
1133static sPAPRDRConnector *spapr_phb_get_pci_func_drc(sPAPRPHBState *phb,
1134                                                    uint32_t busnr,
1135                                                    int32_t devfn)
1136{
1137    return spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_PCI,
1138                                    (phb->index << 16) |
1139                                    (busnr << 8) |
1140                                    devfn);
1141}
1142
1143static sPAPRDRConnector *spapr_phb_get_pci_drc(sPAPRPHBState *phb,
1144                                               PCIDevice *pdev)
1145{
1146    uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))));
1147    return spapr_phb_get_pci_func_drc(phb, busnr, pdev->devfn);
1148}
1149
1150static uint32_t spapr_phb_get_pci_drc_index(sPAPRPHBState *phb,
1151                                            PCIDevice *pdev)
1152{
1153    sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
1154    sPAPRDRConnectorClass *drck;
1155
1156    if (!drc) {
1157        return 0;
1158    }
1159
1160    drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1161    return drck->get_index(drc);
1162}
1163
1164static void spapr_phb_hot_plug_child(HotplugHandler *plug_handler,
1165                                     DeviceState *plugged_dev, Error **errp)
1166{
1167    sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1168    PCIDevice *pdev = PCI_DEVICE(plugged_dev);
1169    sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
1170    Error *local_err = NULL;
1171    PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev)));
1172    uint32_t slotnr = PCI_SLOT(pdev->devfn);
1173
1174    /* if DR is disabled we don't need to do anything in the case of
1175     * hotplug or coldplug callbacks
1176     */
1177    if (!phb->dr_enabled) {
1178        /* if this is a hotplug operation initiated by the user
1179         * we need to let them know it's not enabled
1180         */
1181        if (plugged_dev->hotplugged) {
1182            error_setg(errp, QERR_BUS_NO_HOTPLUG,
1183                       object_get_typename(OBJECT(phb)));
1184        }
1185        return;
1186    }
1187
1188    g_assert(drc);
1189
1190    /* Following the QEMU convention used for PCIe multifunction
1191     * hotplug, we do not allow functions to be hotplugged to a
1192     * slot that already has function 0 present
1193     */
1194    if (plugged_dev->hotplugged && bus->devices[PCI_DEVFN(slotnr, 0)] &&
1195        PCI_FUNC(pdev->devfn) != 0) {
1196        error_setg(errp, "PCI: slot %d function 0 already ocuppied by %s,"
1197                   " additional functions can no longer be exposed to guest.",
1198                   slotnr, bus->devices[PCI_DEVFN(slotnr, 0)]->name);
1199        return;
1200    }
1201
1202    spapr_phb_add_pci_device(drc, phb, pdev, &local_err);
1203    if (local_err) {
1204        error_propagate(errp, local_err);
1205        return;
1206    }
1207
1208    /* If this is function 0, signal hotplug for all the device functions.
1209     * Otherwise defer sending the hotplug event.
1210     */
1211    if (plugged_dev->hotplugged && PCI_FUNC(pdev->devfn) == 0) {
1212        int i;
1213
1214        for (i = 0; i < 8; i++) {
1215            sPAPRDRConnector *func_drc;
1216            sPAPRDRConnectorClass *func_drck;
1217            sPAPRDREntitySense state;
1218
1219            func_drc = spapr_phb_get_pci_func_drc(phb, pci_bus_num(bus),
1220                                                  PCI_DEVFN(slotnr, i));
1221            func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc);
1222            func_drck->entity_sense(func_drc, &state);
1223
1224            if (state == SPAPR_DR_ENTITY_SENSE_PRESENT) {
1225                spapr_hotplug_req_add_by_index(func_drc);
1226            }
1227        }
1228    }
1229}
1230
1231static void spapr_phb_hot_unplug_child(HotplugHandler *plug_handler,
1232                                       DeviceState *plugged_dev, Error **errp)
1233{
1234    sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
1235    PCIDevice *pdev = PCI_DEVICE(plugged_dev);
1236    sPAPRDRConnectorClass *drck;
1237    sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
1238    Error *local_err = NULL;
1239
1240    if (!phb->dr_enabled) {
1241        error_setg(errp, QERR_BUS_NO_HOTPLUG,
1242                   object_get_typename(OBJECT(phb)));
1243        return;
1244    }
1245
1246    g_assert(drc);
1247
1248    drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
1249    if (!drck->release_pending(drc)) {
1250        PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev)));
1251        uint32_t slotnr = PCI_SLOT(pdev->devfn);
1252        sPAPRDRConnector *func_drc;
1253        sPAPRDRConnectorClass *func_drck;
1254        sPAPRDREntitySense state;
1255        int i;
1256
1257        /* ensure any other present functions are pending unplug */
1258        if (PCI_FUNC(pdev->devfn) == 0) {
1259            for (i = 1; i < 8; i++) {
1260                func_drc = spapr_phb_get_pci_func_drc(phb, pci_bus_num(bus),
1261                                                      PCI_DEVFN(slotnr, i));
1262                func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc);
1263                func_drck->entity_sense(func_drc, &state);
1264                if (state == SPAPR_DR_ENTITY_SENSE_PRESENT
1265                    && !func_drck->release_pending(func_drc)) {
1266                    error_setg(errp,
1267                               "PCI: slot %d, function %d still present. "
1268                               "Must unplug all non-0 functions first.",
1269                               slotnr, i);
1270                    return;
1271                }
1272            }
1273        }
1274
1275        spapr_phb_remove_pci_device(drc, phb, pdev, &local_err);
1276        if (local_err) {
1277            error_propagate(errp, local_err);
1278            return;
1279        }
1280
1281        /* if this isn't func 0, defer unplug event. otherwise signal removal
1282         * for all present functions
1283         */
1284        if (PCI_FUNC(pdev->devfn) == 0) {
1285            for (i = 7; i >= 0; i--) {
1286                func_drc = spapr_phb_get_pci_func_drc(phb, pci_bus_num(bus),
1287                                                      PCI_DEVFN(slotnr, i));
1288                func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc);
1289                func_drck->entity_sense(func_drc, &state);
1290                if (state == SPAPR_DR_ENTITY_SENSE_PRESENT) {
1291                    spapr_hotplug_req_remove_by_index(func_drc);
1292                }
1293            }
1294        }
1295    }
1296}
1297
1298static void spapr_phb_realize(DeviceState *dev, Error **errp)
1299{
1300    sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1301    SysBusDevice *s = SYS_BUS_DEVICE(dev);
1302    sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(s);
1303    PCIHostState *phb = PCI_HOST_BRIDGE(s);
1304    char *namebuf;
1305    int i;
1306    PCIBus *bus;
1307    uint64_t msi_window_size = 4096;
1308    sPAPRTCETable *tcet;
1309    const unsigned windows_supported =
1310        sphb->ddw_enabled ? SPAPR_PCI_DMA_MAX_WINDOWS : 1;
1311
1312    if (sphb->index != (uint32_t)-1) {
1313        hwaddr windows_base;
1314
1315        if ((sphb->buid != (uint64_t)-1) || (sphb->dma_liobn[0] != (uint32_t)-1)
1316            || (sphb->dma_liobn[1] != (uint32_t)-1 && windows_supported == 2)
1317            || (sphb->mem_win_addr != (hwaddr)-1)
1318            || (sphb->io_win_addr != (hwaddr)-1)) {
1319            error_setg(errp, "Either \"index\" or other parameters must"
1320                       " be specified for PAPR PHB, not both");
1321            return;
1322        }
1323
1324        if (sphb->index > SPAPR_PCI_MAX_INDEX) {
1325            error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
1326                       SPAPR_PCI_MAX_INDEX);
1327            return;
1328        }
1329
1330        sphb->buid = SPAPR_PCI_BASE_BUID + sphb->index;
1331        for (i = 0; i < windows_supported; ++i) {
1332            sphb->dma_liobn[i] = SPAPR_PCI_LIOBN(sphb->index, i);
1333        }
1334
1335        windows_base = SPAPR_PCI_WINDOW_BASE
1336            + sphb->index * SPAPR_PCI_WINDOW_SPACING;
1337        sphb->mem_win_addr = windows_base + SPAPR_PCI_MMIO_WIN_OFF;
1338        sphb->io_win_addr = windows_base + SPAPR_PCI_IO_WIN_OFF;
1339    }
1340
1341    if (sphb->buid == (uint64_t)-1) {
1342        error_setg(errp, "BUID not specified for PHB");
1343        return;
1344    }
1345
1346    if ((sphb->dma_liobn[0] == (uint32_t)-1) ||
1347        ((sphb->dma_liobn[1] == (uint32_t)-1) && (windows_supported > 1))) {
1348        error_setg(errp, "LIOBN(s) not specified for PHB");
1349        return;
1350    }
1351
1352    if (sphb->mem_win_addr == (hwaddr)-1) {
1353        error_setg(errp, "Memory window address not specified for PHB");
1354        return;
1355    }
1356
1357    if (sphb->io_win_addr == (hwaddr)-1) {
1358        error_setg(errp, "IO window address not specified for PHB");
1359        return;
1360    }
1361
1362    if (spapr_pci_find_phb(spapr, sphb->buid)) {
1363        error_setg(errp, "PCI host bridges must have unique BUIDs");
1364        return;
1365    }
1366
1367    sphb->dtbusname = g_strdup_printf("pci@%" PRIx64, sphb->buid);
1368
1369    namebuf = alloca(strlen(sphb->dtbusname) + 32);
1370
1371    /* Initialize memory regions */
1372    sprintf(namebuf, "%s.mmio", sphb->dtbusname);
1373    memory_region_init(&sphb->memspace, OBJECT(sphb), namebuf, UINT64_MAX);
1374
1375    sprintf(namebuf, "%s.mmio-alias", sphb->dtbusname);
1376    memory_region_init_alias(&sphb->memwindow, OBJECT(sphb),
1377                             namebuf, &sphb->memspace,
1378                             SPAPR_PCI_MEM_WIN_BUS_OFFSET, sphb->mem_win_size);
1379    memory_region_add_subregion(get_system_memory(), sphb->mem_win_addr,
1380                                &sphb->memwindow);
1381
1382    /* Initialize IO regions */
1383    sprintf(namebuf, "%s.io", sphb->dtbusname);
1384    memory_region_init(&sphb->iospace, OBJECT(sphb),
1385                       namebuf, SPAPR_PCI_IO_WIN_SIZE);
1386
1387    sprintf(namebuf, "%s.io-alias", sphb->dtbusname);
1388    memory_region_init_alias(&sphb->iowindow, OBJECT(sphb), namebuf,
1389                             &sphb->iospace, 0, SPAPR_PCI_IO_WIN_SIZE);
1390    memory_region_add_subregion(get_system_memory(), sphb->io_win_addr,
1391                                &sphb->iowindow);
1392
1393    bus = pci_register_bus(dev, NULL,
1394                           pci_spapr_set_irq, pci_spapr_map_irq, sphb,
1395                           &sphb->memspace, &sphb->iospace,
1396                           PCI_DEVFN(0, 0), PCI_NUM_PINS, TYPE_PCI_BUS);
1397    phb->bus = bus;
1398    qbus_set_hotplug_handler(BUS(phb->bus), DEVICE(sphb), NULL);
1399
1400    /*
1401     * Initialize PHB address space.
1402     * By default there will be at least one subregion for default
1403     * 32bit DMA window.
1404     * Later the guest might want to create another DMA window
1405     * which will become another memory subregion.
1406     */
1407    sprintf(namebuf, "%s.iommu-root", sphb->dtbusname);
1408
1409    memory_region_init(&sphb->iommu_root, OBJECT(sphb),
1410                       namebuf, UINT64_MAX);
1411    address_space_init(&sphb->iommu_as, &sphb->iommu_root,
1412                       sphb->dtbusname);
1413
1414    /*
1415     * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors,
1416     * we need to allocate some memory to catch those writes coming
1417     * from msi_notify()/msix_notify().
1418     * As MSIMessage:addr is going to be the same and MSIMessage:data
1419     * is going to be a VIRQ number, 4 bytes of the MSI MR will only
1420     * be used.
1421     *
1422     * For KVM we want to ensure that this memory is a full page so that
1423     * our memory slot is of page size granularity.
1424     */
1425#ifdef CONFIG_KVM
1426    if (kvm_enabled()) {
1427        msi_window_size = getpagesize();
1428    }
1429#endif
1430
1431    memory_region_init_io(&sphb->msiwindow, NULL, &spapr_msi_ops, spapr,
1432                          "msi", msi_window_size);
1433    memory_region_add_subregion(&sphb->iommu_root, SPAPR_PCI_MSI_WINDOW,
1434                                &sphb->msiwindow);
1435
1436    pci_setup_iommu(bus, spapr_pci_dma_iommu, sphb);
1437
1438    pci_bus_set_route_irq_fn(bus, spapr_route_intx_pin_to_irq);
1439
1440    QLIST_INSERT_HEAD(&spapr->phbs, sphb, list);
1441
1442    /* Initialize the LSI table */
1443    for (i = 0; i < PCI_NUM_PINS; i++) {
1444        uint32_t irq;
1445        Error *local_err = NULL;
1446
1447        irq = xics_spapr_alloc_block(spapr->xics, 0, 1, true, false,
1448                                     &local_err);
1449        if (local_err) {
1450            error_propagate(errp, local_err);
1451            error_prepend(errp, "can't allocate LSIs: ");
1452            return;
1453        }
1454
1455        sphb->lsi_table[i].irq = irq;
1456    }
1457
1458    /* allocate connectors for child PCI devices */
1459    if (sphb->dr_enabled) {
1460        for (i = 0; i < PCI_SLOT_MAX * 8; i++) {
1461            spapr_dr_connector_new(OBJECT(phb),
1462                                   SPAPR_DR_CONNECTOR_TYPE_PCI,
1463                                   (sphb->index << 16) | i);
1464        }
1465    }
1466
1467    /* DMA setup */
1468    for (i = 0; i < windows_supported; ++i) {
1469        tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn[i]);
1470        if (!tcet) {
1471            error_setg(errp, "Creating window#%d failed for %s",
1472                       i, sphb->dtbusname);
1473            return;
1474        }
1475        memory_region_add_subregion_overlap(&sphb->iommu_root, 0,
1476                                            spapr_tce_get_iommu(tcet), 0);
1477    }
1478
1479    sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free);
1480}
1481
1482static int spapr_phb_children_reset(Object *child, void *opaque)
1483{
1484    DeviceState *dev = (DeviceState *) object_dynamic_cast(child, TYPE_DEVICE);
1485
1486    if (dev) {
1487        device_reset(dev);
1488    }
1489
1490    return 0;
1491}
1492
1493void spapr_phb_dma_reset(sPAPRPHBState *sphb)
1494{
1495    int i;
1496    sPAPRTCETable *tcet;
1497
1498    for (i = 0; i < SPAPR_PCI_DMA_MAX_WINDOWS; ++i) {
1499        tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[i]);
1500
1501        if (tcet && tcet->nb_table) {
1502            spapr_tce_table_disable(tcet);
1503        }
1504    }
1505
1506    /* Register default 32bit DMA window */
1507    tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[0]);
1508    spapr_tce_table_enable(tcet, SPAPR_TCE_PAGE_SHIFT, sphb->dma_win_addr,
1509                           sphb->dma_win_size >> SPAPR_TCE_PAGE_SHIFT);
1510}
1511
1512static void spapr_phb_reset(DeviceState *qdev)
1513{
1514    sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(qdev);
1515
1516    spapr_phb_dma_reset(sphb);
1517
1518    /* Reset the IOMMU state */
1519    object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL);
1520
1521    if (spapr_phb_eeh_available(SPAPR_PCI_HOST_BRIDGE(qdev))) {
1522        spapr_phb_vfio_reset(qdev);
1523    }
1524}
1525
1526static Property spapr_phb_properties[] = {
1527    DEFINE_PROP_UINT32("index", sPAPRPHBState, index, -1),
1528    DEFINE_PROP_UINT64("buid", sPAPRPHBState, buid, -1),
1529    DEFINE_PROP_UINT32("liobn", sPAPRPHBState, dma_liobn[0], -1),
1530    DEFINE_PROP_UINT32("liobn64", sPAPRPHBState, dma_liobn[1], -1),
1531    DEFINE_PROP_UINT64("mem_win_addr", sPAPRPHBState, mem_win_addr, -1),
1532    DEFINE_PROP_UINT64("mem_win_size", sPAPRPHBState, mem_win_size,
1533                       SPAPR_PCI_MMIO_WIN_SIZE),
1534    DEFINE_PROP_UINT64("io_win_addr", sPAPRPHBState, io_win_addr, -1),
1535    DEFINE_PROP_UINT64("io_win_size", sPAPRPHBState, io_win_size,
1536                       SPAPR_PCI_IO_WIN_SIZE),
1537    DEFINE_PROP_BOOL("dynamic-reconfiguration", sPAPRPHBState, dr_enabled,
1538                     true),
1539    /* Default DMA window is 0..1GB */
1540    DEFINE_PROP_UINT64("dma_win_addr", sPAPRPHBState, dma_win_addr, 0),
1541    DEFINE_PROP_UINT64("dma_win_size", sPAPRPHBState, dma_win_size, 0x40000000),
1542    DEFINE_PROP_UINT64("dma64_win_addr", sPAPRPHBState, dma64_win_addr,
1543                       0x800000000000000ULL),
1544    DEFINE_PROP_BOOL("ddw", sPAPRPHBState, ddw_enabled, true),
1545    DEFINE_PROP_UINT64("pgsz", sPAPRPHBState, page_size_mask,
1546                       (1ULL << 12) | (1ULL << 16)),
1547    DEFINE_PROP_END_OF_LIST(),
1548};
1549
1550static const VMStateDescription vmstate_spapr_pci_lsi = {
1551    .name = "spapr_pci/lsi",
1552    .version_id = 1,
1553    .minimum_version_id = 1,
1554    .fields = (VMStateField[]) {
1555        VMSTATE_UINT32_EQUAL(irq, struct spapr_pci_lsi),
1556
1557        VMSTATE_END_OF_LIST()
1558    },
1559};
1560
1561static const VMStateDescription vmstate_spapr_pci_msi = {
1562    .name = "spapr_pci/msi",
1563    .version_id = 1,
1564    .minimum_version_id = 1,
1565    .fields = (VMStateField []) {
1566        VMSTATE_UINT32(key, spapr_pci_msi_mig),
1567        VMSTATE_UINT32(value.first_irq, spapr_pci_msi_mig),
1568        VMSTATE_UINT32(value.num, spapr_pci_msi_mig),
1569        VMSTATE_END_OF_LIST()
1570    },
1571};
1572
1573static void spapr_pci_pre_save(void *opaque)
1574{
1575    sPAPRPHBState *sphb = opaque;
1576    GHashTableIter iter;
1577    gpointer key, value;
1578    int i;
1579
1580    g_free(sphb->msi_devs);
1581    sphb->msi_devs = NULL;
1582    sphb->msi_devs_num = g_hash_table_size(sphb->msi);
1583    if (!sphb->msi_devs_num) {
1584        return;
1585    }
1586    sphb->msi_devs = g_malloc(sphb->msi_devs_num * sizeof(spapr_pci_msi_mig));
1587
1588    g_hash_table_iter_init(&iter, sphb->msi);
1589    for (i = 0; g_hash_table_iter_next(&iter, &key, &value); ++i) {
1590        sphb->msi_devs[i].key = *(uint32_t *) key;
1591        sphb->msi_devs[i].value = *(spapr_pci_msi *) value;
1592    }
1593}
1594
1595static int spapr_pci_post_load(void *opaque, int version_id)
1596{
1597    sPAPRPHBState *sphb = opaque;
1598    gpointer key, value;
1599    int i;
1600
1601    for (i = 0; i < sphb->msi_devs_num; ++i) {
1602        key = g_memdup(&sphb->msi_devs[i].key,
1603                       sizeof(sphb->msi_devs[i].key));
1604        value = g_memdup(&sphb->msi_devs[i].value,
1605                         sizeof(sphb->msi_devs[i].value));
1606        g_hash_table_insert(sphb->msi, key, value);
1607    }
1608    g_free(sphb->msi_devs);
1609    sphb->msi_devs = NULL;
1610    sphb->msi_devs_num = 0;
1611
1612    return 0;
1613}
1614
1615static const VMStateDescription vmstate_spapr_pci = {
1616    .name = "spapr_pci",
1617    .version_id = 2,
1618    .minimum_version_id = 2,
1619    .pre_save = spapr_pci_pre_save,
1620    .post_load = spapr_pci_post_load,
1621    .fields = (VMStateField[]) {
1622        VMSTATE_UINT64_EQUAL(buid, sPAPRPHBState),
1623        VMSTATE_UINT32_EQUAL(dma_liobn[0], sPAPRPHBState),
1624        VMSTATE_UINT64_EQUAL(mem_win_addr, sPAPRPHBState),
1625        VMSTATE_UINT64_EQUAL(mem_win_size, sPAPRPHBState),
1626        VMSTATE_UINT64_EQUAL(io_win_addr, sPAPRPHBState),
1627        VMSTATE_UINT64_EQUAL(io_win_size, sPAPRPHBState),
1628        VMSTATE_STRUCT_ARRAY(lsi_table, sPAPRPHBState, PCI_NUM_PINS, 0,
1629                             vmstate_spapr_pci_lsi, struct spapr_pci_lsi),
1630        VMSTATE_INT32(msi_devs_num, sPAPRPHBState),
1631        VMSTATE_STRUCT_VARRAY_ALLOC(msi_devs, sPAPRPHBState, msi_devs_num, 0,
1632                                    vmstate_spapr_pci_msi, spapr_pci_msi_mig),
1633        VMSTATE_END_OF_LIST()
1634    },
1635};
1636
1637static const char *spapr_phb_root_bus_path(PCIHostState *host_bridge,
1638                                           PCIBus *rootbus)
1639{
1640    sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(host_bridge);
1641
1642    return sphb->dtbusname;
1643}
1644
1645static void spapr_phb_class_init(ObjectClass *klass, void *data)
1646{
1647    PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
1648    DeviceClass *dc = DEVICE_CLASS(klass);
1649    HotplugHandlerClass *hp = HOTPLUG_HANDLER_CLASS(klass);
1650
1651    hc->root_bus_path = spapr_phb_root_bus_path;
1652    dc->realize = spapr_phb_realize;
1653    dc->props = spapr_phb_properties;
1654    dc->reset = spapr_phb_reset;
1655    dc->vmsd = &vmstate_spapr_pci;
1656    set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
1657    hp->plug = spapr_phb_hot_plug_child;
1658    hp->unplug = spapr_phb_hot_unplug_child;
1659}
1660
1661static const TypeInfo spapr_phb_info = {
1662    .name          = TYPE_SPAPR_PCI_HOST_BRIDGE,
1663    .parent        = TYPE_PCI_HOST_BRIDGE,
1664    .instance_size = sizeof(sPAPRPHBState),
1665    .class_init    = spapr_phb_class_init,
1666    .interfaces    = (InterfaceInfo[]) {
1667        { TYPE_HOTPLUG_HANDLER },
1668        { }
1669    }
1670};
1671
1672PCIHostState *spapr_create_phb(sPAPRMachineState *spapr, int index)
1673{
1674    DeviceState *dev;
1675
1676    dev = qdev_create(NULL, TYPE_SPAPR_PCI_HOST_BRIDGE);
1677    qdev_prop_set_uint32(dev, "index", index);
1678    qdev_init_nofail(dev);
1679
1680    return PCI_HOST_BRIDGE(dev);
1681}
1682
1683typedef struct sPAPRFDT {
1684    void *fdt;
1685    int node_off;
1686    sPAPRPHBState *sphb;
1687} sPAPRFDT;
1688
1689static void spapr_populate_pci_devices_dt(PCIBus *bus, PCIDevice *pdev,
1690                                          void *opaque)
1691{
1692    PCIBus *sec_bus;
1693    sPAPRFDT *p = opaque;
1694    int offset;
1695    sPAPRFDT s_fdt;
1696
1697    offset = spapr_create_pci_child_dt(p->sphb, pdev, p->fdt, p->node_off);
1698    if (!offset) {
1699        error_report("Failed to create pci child device tree node");
1700        return;
1701    }
1702
1703    if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
1704         PCI_HEADER_TYPE_BRIDGE)) {
1705        return;
1706    }
1707
1708    sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
1709    if (!sec_bus) {
1710        return;
1711    }
1712
1713    s_fdt.fdt = p->fdt;
1714    s_fdt.node_off = offset;
1715    s_fdt.sphb = p->sphb;
1716    pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
1717                        spapr_populate_pci_devices_dt,
1718                        &s_fdt);
1719}
1720
1721static void spapr_phb_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev,
1722                                           void *opaque)
1723{
1724    unsigned int *bus_no = opaque;
1725    unsigned int primary = *bus_no;
1726    unsigned int subordinate = 0xff;
1727    PCIBus *sec_bus = NULL;
1728
1729    if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
1730         PCI_HEADER_TYPE_BRIDGE)) {
1731        return;
1732    }
1733
1734    (*bus_no)++;
1735    pci_default_write_config(pdev, PCI_PRIMARY_BUS, primary, 1);
1736    pci_default_write_config(pdev, PCI_SECONDARY_BUS, *bus_no, 1);
1737    pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1);
1738
1739    sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
1740    if (!sec_bus) {
1741        return;
1742    }
1743
1744    pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, subordinate, 1);
1745    pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
1746                        spapr_phb_pci_enumerate_bridge, bus_no);
1747    pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1);
1748}
1749
1750static void spapr_phb_pci_enumerate(sPAPRPHBState *phb)
1751{
1752    PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
1753    unsigned int bus_no = 0;
1754
1755    pci_for_each_device(bus, pci_bus_num(bus),
1756                        spapr_phb_pci_enumerate_bridge,
1757                        &bus_no);
1758
1759}
1760
1761int spapr_populate_pci_dt(sPAPRPHBState *phb,
1762                          uint32_t xics_phandle,
1763                          void *fdt)
1764{
1765    int bus_off, i, j, ret;
1766    char nodename[FDT_NAME_MAX];
1767    uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
1768    const uint64_t mmiosize = memory_region_size(&phb->memwindow);
1769    const uint64_t w32max = (1ULL << 32) - SPAPR_PCI_MEM_WIN_BUS_OFFSET;
1770    const uint64_t w32size = MIN(w32max, mmiosize);
1771    const uint64_t w64size = (mmiosize > w32size) ? (mmiosize - w32size) : 0;
1772    struct {
1773        uint32_t hi;
1774        uint64_t child;
1775        uint64_t parent;
1776        uint64_t size;
1777    } QEMU_PACKED ranges[] = {
1778        {
1779            cpu_to_be32(b_ss(1)), cpu_to_be64(0),
1780            cpu_to_be64(phb->io_win_addr),
1781            cpu_to_be64(memory_region_size(&phb->iospace)),
1782        },
1783        {
1784            cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET),
1785            cpu_to_be64(phb->mem_win_addr),
1786            cpu_to_be64(w32size),
1787        },
1788        {
1789            cpu_to_be32(b_ss(3)), cpu_to_be64(1ULL << 32),
1790            cpu_to_be64(phb->mem_win_addr + w32size),
1791            cpu_to_be64(w64size)
1792        },
1793    };
1794    const unsigned sizeof_ranges = (w64size ? 3 : 2) * sizeof(ranges[0]);
1795    uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 };
1796    uint32_t interrupt_map_mask[] = {
1797        cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)};
1798    uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7];
1799    uint32_t ddw_applicable[] = {
1800        cpu_to_be32(RTAS_IBM_QUERY_PE_DMA_WINDOW),
1801        cpu_to_be32(RTAS_IBM_CREATE_PE_DMA_WINDOW),
1802        cpu_to_be32(RTAS_IBM_REMOVE_PE_DMA_WINDOW)
1803    };
1804    uint32_t ddw_extensions[] = {
1805        cpu_to_be32(1),
1806        cpu_to_be32(RTAS_IBM_RESET_PE_DMA_WINDOW)
1807    };
1808    sPAPRTCETable *tcet;
1809    PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
1810    sPAPRFDT s_fdt;
1811
1812    /* Start populating the FDT */
1813    snprintf(nodename, FDT_NAME_MAX, "pci@%" PRIx64, phb->buid);
1814    bus_off = fdt_add_subnode(fdt, 0, nodename);
1815    if (bus_off < 0) {
1816        return bus_off;
1817    }
1818
1819    /* Write PHB properties */
1820    _FDT(fdt_setprop_string(fdt, bus_off, "device_type", "pci"));
1821    _FDT(fdt_setprop_string(fdt, bus_off, "compatible", "IBM,Logical_PHB"));
1822    _FDT(fdt_setprop_cell(fdt, bus_off, "#address-cells", 0x3));
1823    _FDT(fdt_setprop_cell(fdt, bus_off, "#size-cells", 0x2));
1824    _FDT(fdt_setprop_cell(fdt, bus_off, "#interrupt-cells", 0x1));
1825    _FDT(fdt_setprop(fdt, bus_off, "used-by-rtas", NULL, 0));
1826    _FDT(fdt_setprop(fdt, bus_off, "bus-range", &bus_range, sizeof(bus_range)));
1827    _FDT(fdt_setprop(fdt, bus_off, "ranges", &ranges, sizeof_ranges));
1828    _FDT(fdt_setprop(fdt, bus_off, "reg", &bus_reg, sizeof(bus_reg)));
1829    _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pci-config-space-type", 0x1));
1830    _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi", XICS_IRQS_SPAPR));
1831
1832    /* Dynamic DMA window */
1833    if (phb->ddw_enabled) {
1834        _FDT(fdt_setprop(fdt, bus_off, "ibm,ddw-applicable", &ddw_applicable,
1835                         sizeof(ddw_applicable)));
1836        _FDT(fdt_setprop(fdt, bus_off, "ibm,ddw-extensions",
1837                         &ddw_extensions, sizeof(ddw_extensions)));
1838    }
1839
1840    /* Build the interrupt-map, this must matches what is done
1841     * in pci_spapr_map_irq
1842     */
1843    _FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask",
1844                     &interrupt_map_mask, sizeof(interrupt_map_mask)));
1845    for (i = 0; i < PCI_SLOT_MAX; i++) {
1846        for (j = 0; j < PCI_NUM_PINS; j++) {
1847            uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j];
1848            int lsi_num = pci_spapr_swizzle(i, j);
1849
1850            irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0));
1851            irqmap[1] = 0;
1852            irqmap[2] = 0;
1853            irqmap[3] = cpu_to_be32(j+1);
1854            irqmap[4] = cpu_to_be32(xics_phandle);
1855            irqmap[5] = cpu_to_be32(phb->lsi_table[lsi_num].irq);
1856            irqmap[6] = cpu_to_be32(0x8);
1857        }
1858    }
1859    /* Write interrupt map */
1860    _FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map,
1861                     sizeof(interrupt_map)));
1862
1863    tcet = spapr_tce_find_by_liobn(phb->dma_liobn[0]);
1864    if (!tcet) {
1865        return -1;
1866    }
1867    spapr_dma_dt(fdt, bus_off, "ibm,dma-window",
1868                 tcet->liobn, tcet->bus_offset,
1869                 tcet->nb_table << tcet->page_shift);
1870
1871    /* Walk the bridges and program the bus numbers*/
1872    spapr_phb_pci_enumerate(phb);
1873    _FDT(fdt_setprop_cell(fdt, bus_off, "qemu,phb-enumerated", 0x1));
1874
1875    /* Populate tree nodes with PCI devices attached */
1876    s_fdt.fdt = fdt;
1877    s_fdt.node_off = bus_off;
1878    s_fdt.sphb = phb;
1879    pci_for_each_device(bus, pci_bus_num(bus),
1880                        spapr_populate_pci_devices_dt,
1881                        &s_fdt);
1882
1883    ret = spapr_drc_populate_dt(fdt, bus_off, OBJECT(phb),
1884                                SPAPR_DR_CONNECTOR_TYPE_PCI);
1885    if (ret) {
1886        return ret;
1887    }
1888
1889    return 0;
1890}
1891
1892void spapr_pci_rtas_init(void)
1893{
1894    spapr_rtas_register(RTAS_READ_PCI_CONFIG, "read-pci-config",
1895                        rtas_read_pci_config);
1896    spapr_rtas_register(RTAS_WRITE_PCI_CONFIG, "write-pci-config",
1897                        rtas_write_pci_config);
1898    spapr_rtas_register(RTAS_IBM_READ_PCI_CONFIG, "ibm,read-pci-config",
1899                        rtas_ibm_read_pci_config);
1900    spapr_rtas_register(RTAS_IBM_WRITE_PCI_CONFIG, "ibm,write-pci-config",
1901                        rtas_ibm_write_pci_config);
1902    if (msi_nonbroken) {
1903        spapr_rtas_register(RTAS_IBM_QUERY_INTERRUPT_SOURCE_NUMBER,
1904                            "ibm,query-interrupt-source-number",
1905                            rtas_ibm_query_interrupt_source_number);
1906        spapr_rtas_register(RTAS_IBM_CHANGE_MSI, "ibm,change-msi",
1907                            rtas_ibm_change_msi);
1908    }
1909
1910    spapr_rtas_register(RTAS_IBM_SET_EEH_OPTION,
1911                        "ibm,set-eeh-option",
1912                        rtas_ibm_set_eeh_option);
1913    spapr_rtas_register(RTAS_IBM_GET_CONFIG_ADDR_INFO2,
1914                        "ibm,get-config-addr-info2",
1915                        rtas_ibm_get_config_addr_info2);
1916    spapr_rtas_register(RTAS_IBM_READ_SLOT_RESET_STATE2,
1917                        "ibm,read-slot-reset-state2",
1918                        rtas_ibm_read_slot_reset_state2);
1919    spapr_rtas_register(RTAS_IBM_SET_SLOT_RESET,
1920                        "ibm,set-slot-reset",
1921                        rtas_ibm_set_slot_reset);
1922    spapr_rtas_register(RTAS_IBM_CONFIGURE_PE,
1923                        "ibm,configure-pe",
1924                        rtas_ibm_configure_pe);
1925    spapr_rtas_register(RTAS_IBM_SLOT_ERROR_DETAIL,
1926                        "ibm,slot-error-detail",
1927                        rtas_ibm_slot_error_detail);
1928}
1929
1930static void spapr_pci_register_types(void)
1931{
1932    type_register_static(&spapr_phb_info);
1933}
1934
1935type_init(spapr_pci_register_types)
1936
1937static int spapr_switch_one_vga(DeviceState *dev, void *opaque)
1938{
1939    bool be = *(bool *)opaque;
1940
1941    if (object_dynamic_cast(OBJECT(dev), "VGA")
1942        || object_dynamic_cast(OBJECT(dev), "secondary-vga")) {
1943        object_property_set_bool(OBJECT(dev), be, "big-endian-framebuffer",
1944                                 &error_abort);
1945    }
1946    return 0;
1947}
1948
1949void spapr_pci_switch_vga(bool big_endian)
1950{
1951    sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1952    sPAPRPHBState *sphb;
1953
1954    /*
1955     * For backward compatibility with existing guests, we switch
1956     * the endianness of the VGA controller when changing the guest
1957     * interrupt mode
1958     */
1959    QLIST_FOREACH(sphb, &spapr->phbs, list) {
1960        BusState *bus = &PCI_HOST_BRIDGE(sphb)->bus->qbus;
1961        qbus_walk_children(bus, spapr_switch_one_vga, NULL, NULL, NULL,
1962                           &big_endian);
1963    }
1964}
1965