qemu/hw/ppc/spapr_irq.c
<<
>>
Prefs
   1/*
   2 * QEMU PowerPC sPAPR IRQ interface
   3 *
   4 * Copyright (c) 2018, IBM Corporation.
   5 *
   6 * This code is licensed under the GPL version 2 or later. See the
   7 * COPYING file in the top-level directory.
   8 */
   9
  10#include "qemu/osdep.h"
  11#include "qemu/log.h"
  12#include "qemu/error-report.h"
  13#include "qapi/error.h"
  14#include "hw/ppc/spapr.h"
  15#include "hw/ppc/spapr_cpu_core.h"
  16#include "hw/ppc/spapr_xive.h"
  17#include "hw/ppc/xics.h"
  18#include "hw/ppc/xics_spapr.h"
  19#include "cpu-models.h"
  20#include "sysemu/kvm.h"
  21
  22#include "trace.h"
  23
  24void spapr_irq_msi_init(SpaprMachineState *spapr, uint32_t nr_msis)
  25{
  26    spapr->irq_map_nr = nr_msis;
  27    spapr->irq_map = bitmap_new(spapr->irq_map_nr);
  28}
  29
  30int spapr_irq_msi_alloc(SpaprMachineState *spapr, uint32_t num, bool align,
  31                        Error **errp)
  32{
  33    int irq;
  34
  35    /*
  36     * The 'align_mask' parameter of bitmap_find_next_zero_area()
  37     * should be one less than a power of 2; 0 means no
  38     * alignment. Adapt the 'align' value of the former allocator
  39     * to fit the requirements of bitmap_find_next_zero_area()
  40     */
  41    align -= 1;
  42
  43    irq = bitmap_find_next_zero_area(spapr->irq_map, spapr->irq_map_nr, 0, num,
  44                                     align);
  45    if (irq == spapr->irq_map_nr) {
  46        error_setg(errp, "can't find a free %d-IRQ block", num);
  47        return -1;
  48    }
  49
  50    bitmap_set(spapr->irq_map, irq, num);
  51
  52    return irq + SPAPR_IRQ_MSI;
  53}
  54
  55void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num)
  56{
  57    bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num);
  58}
  59
  60void spapr_irq_msi_reset(SpaprMachineState *spapr)
  61{
  62    bitmap_clear(spapr->irq_map, 0, spapr->irq_map_nr);
  63}
  64
  65
  66/*
  67 * XICS IRQ backend.
  68 */
  69
  70static ICSState *spapr_ics_create(SpaprMachineState *spapr,
  71                                  int nr_irqs, Error **errp)
  72{
  73    Error *local_err = NULL;
  74    Object *obj;
  75
  76    obj = object_new(TYPE_ICS_SIMPLE);
  77    object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
  78    object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr),
  79                                   &error_abort);
  80    object_property_set_int(obj, nr_irqs, "nr-irqs", &local_err);
  81    if (local_err) {
  82        goto error;
  83    }
  84    object_property_set_bool(obj, true, "realized", &local_err);
  85    if (local_err) {
  86        goto error;
  87    }
  88
  89    return ICS_BASE(obj);
  90
  91error:
  92    error_propagate(errp, local_err);
  93    return NULL;
  94}
  95
  96static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs,
  97                                Error **errp)
  98{
  99    MachineState *machine = MACHINE(spapr);
 100    Error *local_err = NULL;
 101    bool xics_kvm = false;
 102
 103    if (kvm_enabled()) {
 104        if (machine_kernel_irqchip_allowed(machine) &&
 105            !xics_kvm_init(spapr, &local_err)) {
 106            xics_kvm = true;
 107        }
 108        if (machine_kernel_irqchip_required(machine) && !xics_kvm) {
 109            error_prepend(&local_err,
 110                          "kernel_irqchip requested but unavailable: ");
 111            goto error;
 112        }
 113        error_free(local_err);
 114        local_err = NULL;
 115    }
 116
 117    if (!xics_kvm) {
 118        xics_spapr_init(spapr);
 119    }
 120
 121    spapr->ics = spapr_ics_create(spapr, nr_irqs, &local_err);
 122
 123error:
 124    error_propagate(errp, local_err);
 125}
 126
 127#define ICS_IRQ_FREE(ics, srcno)   \
 128    (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
 129
 130static int spapr_irq_claim_xics(SpaprMachineState *spapr, int irq, bool lsi,
 131                                Error **errp)
 132{
 133    ICSState *ics = spapr->ics;
 134
 135    assert(ics);
 136
 137    if (!ics_valid_irq(ics, irq)) {
 138        error_setg(errp, "IRQ %d is invalid", irq);
 139        return -1;
 140    }
 141
 142    if (!ICS_IRQ_FREE(ics, irq - ics->offset)) {
 143        error_setg(errp, "IRQ %d is not free", irq);
 144        return -1;
 145    }
 146
 147    ics_set_irq_type(ics, irq - ics->offset, lsi);
 148    return 0;
 149}
 150
 151static void spapr_irq_free_xics(SpaprMachineState *spapr, int irq, int num)
 152{
 153    ICSState *ics = spapr->ics;
 154    uint32_t srcno = irq - ics->offset;
 155    int i;
 156
 157    if (ics_valid_irq(ics, irq)) {
 158        trace_spapr_irq_free(0, irq, num);
 159        for (i = srcno; i < srcno + num; ++i) {
 160            if (ICS_IRQ_FREE(ics, i)) {
 161                trace_spapr_irq_free_warn(0, i);
 162            }
 163            memset(&ics->irqs[i], 0, sizeof(ICSIRQState));
 164        }
 165    }
 166}
 167
 168static qemu_irq spapr_qirq_xics(SpaprMachineState *spapr, int irq)
 169{
 170    ICSState *ics = spapr->ics;
 171    uint32_t srcno = irq - ics->offset;
 172
 173    if (ics_valid_irq(ics, irq)) {
 174        return spapr->qirqs[srcno];
 175    }
 176
 177    return NULL;
 178}
 179
 180static void spapr_irq_print_info_xics(SpaprMachineState *spapr, Monitor *mon)
 181{
 182    CPUState *cs;
 183
 184    CPU_FOREACH(cs) {
 185        PowerPCCPU *cpu = POWERPC_CPU(cs);
 186
 187        icp_pic_print_info(spapr_cpu_state(cpu)->icp, mon);
 188    }
 189
 190    ics_pic_print_info(spapr->ics, mon);
 191}
 192
 193static void spapr_irq_cpu_intc_create_xics(SpaprMachineState *spapr,
 194                                           PowerPCCPU *cpu, Error **errp)
 195{
 196    Error *local_err = NULL;
 197    Object *obj;
 198    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 199
 200    obj = icp_create(OBJECT(cpu), TYPE_ICP, XICS_FABRIC(spapr),
 201                     &local_err);
 202    if (local_err) {
 203        error_propagate(errp, local_err);
 204        return;
 205    }
 206
 207    spapr_cpu->icp = ICP(obj);
 208}
 209
 210static int spapr_irq_post_load_xics(SpaprMachineState *spapr, int version_id)
 211{
 212    if (!kvm_irqchip_in_kernel()) {
 213        CPUState *cs;
 214        CPU_FOREACH(cs) {
 215            PowerPCCPU *cpu = POWERPC_CPU(cs);
 216            icp_resend(spapr_cpu_state(cpu)->icp);
 217        }
 218    }
 219    return 0;
 220}
 221
 222static void spapr_irq_set_irq_xics(void *opaque, int srcno, int val)
 223{
 224    SpaprMachineState *spapr = opaque;
 225
 226    ics_simple_set_irq(spapr->ics, srcno, val);
 227}
 228
 229static void spapr_irq_reset_xics(SpaprMachineState *spapr, Error **errp)
 230{
 231    /* TODO: create the KVM XICS device */
 232}
 233
 234static const char *spapr_irq_get_nodename_xics(SpaprMachineState *spapr)
 235{
 236    return XICS_NODENAME;
 237}
 238
 239#define SPAPR_IRQ_XICS_NR_IRQS     0x1000
 240#define SPAPR_IRQ_XICS_NR_MSIS     \
 241    (XICS_IRQ_BASE + SPAPR_IRQ_XICS_NR_IRQS - SPAPR_IRQ_MSI)
 242
 243SpaprIrq spapr_irq_xics = {
 244    .nr_irqs     = SPAPR_IRQ_XICS_NR_IRQS,
 245    .nr_msis     = SPAPR_IRQ_XICS_NR_MSIS,
 246    .ov5         = SPAPR_OV5_XIVE_LEGACY,
 247
 248    .init        = spapr_irq_init_xics,
 249    .claim       = spapr_irq_claim_xics,
 250    .free        = spapr_irq_free_xics,
 251    .qirq        = spapr_qirq_xics,
 252    .print_info  = spapr_irq_print_info_xics,
 253    .dt_populate = spapr_dt_xics,
 254    .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
 255    .post_load   = spapr_irq_post_load_xics,
 256    .reset       = spapr_irq_reset_xics,
 257    .set_irq     = spapr_irq_set_irq_xics,
 258    .get_nodename = spapr_irq_get_nodename_xics,
 259};
 260
 261/*
 262 * XIVE IRQ backend.
 263 */
 264static void spapr_irq_init_xive(SpaprMachineState *spapr, int nr_irqs,
 265                                Error **errp)
 266{
 267    MachineState *machine = MACHINE(spapr);
 268    uint32_t nr_servers = spapr_max_server_number(spapr);
 269    DeviceState *dev;
 270    int i;
 271
 272    /* KVM XIVE device not yet available */
 273    if (kvm_enabled()) {
 274        if (machine_kernel_irqchip_required(machine)) {
 275            error_setg(errp, "kernel_irqchip requested. no KVM XIVE support");
 276            return;
 277        }
 278    }
 279
 280    dev = qdev_create(NULL, TYPE_SPAPR_XIVE);
 281    qdev_prop_set_uint32(dev, "nr-irqs", nr_irqs);
 282    /*
 283     * 8 XIVE END structures per CPU. One for each available priority
 284     */
 285    qdev_prop_set_uint32(dev, "nr-ends", nr_servers << 3);
 286    qdev_init_nofail(dev);
 287
 288    spapr->xive = SPAPR_XIVE(dev);
 289
 290    /* Enable the CPU IPIs */
 291    for (i = 0; i < nr_servers; ++i) {
 292        spapr_xive_irq_claim(spapr->xive, SPAPR_IRQ_IPI + i, false);
 293    }
 294
 295    spapr_xive_hcall_init(spapr);
 296}
 297
 298static int spapr_irq_claim_xive(SpaprMachineState *spapr, int irq, bool lsi,
 299                                Error **errp)
 300{
 301    if (!spapr_xive_irq_claim(spapr->xive, irq, lsi)) {
 302        error_setg(errp, "IRQ %d is invalid", irq);
 303        return -1;
 304    }
 305    return 0;
 306}
 307
 308static void spapr_irq_free_xive(SpaprMachineState *spapr, int irq, int num)
 309{
 310    int i;
 311
 312    for (i = irq; i < irq + num; ++i) {
 313        spapr_xive_irq_free(spapr->xive, i);
 314    }
 315}
 316
 317static qemu_irq spapr_qirq_xive(SpaprMachineState *spapr, int irq)
 318{
 319    SpaprXive *xive = spapr->xive;
 320
 321    if (irq >= xive->nr_irqs) {
 322        return NULL;
 323    }
 324
 325    /* The sPAPR machine/device should have claimed the IRQ before */
 326    assert(xive_eas_is_valid(&xive->eat[irq]));
 327
 328    return spapr->qirqs[irq];
 329}
 330
 331static void spapr_irq_print_info_xive(SpaprMachineState *spapr,
 332                                      Monitor *mon)
 333{
 334    CPUState *cs;
 335
 336    CPU_FOREACH(cs) {
 337        PowerPCCPU *cpu = POWERPC_CPU(cs);
 338
 339        xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon);
 340    }
 341
 342    spapr_xive_pic_print_info(spapr->xive, mon);
 343}
 344
 345static void spapr_irq_cpu_intc_create_xive(SpaprMachineState *spapr,
 346                                           PowerPCCPU *cpu, Error **errp)
 347{
 348    Error *local_err = NULL;
 349    Object *obj;
 350    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 351
 352    obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(spapr->xive), &local_err);
 353    if (local_err) {
 354        error_propagate(errp, local_err);
 355        return;
 356    }
 357
 358    spapr_cpu->tctx = XIVE_TCTX(obj);
 359
 360    /*
 361     * (TCG) Early setting the OS CAM line for hotplugged CPUs as they
 362     * don't beneficiate from the reset of the XIVE IRQ backend
 363     */
 364    spapr_xive_set_tctx_os_cam(spapr_cpu->tctx);
 365}
 366
 367static int spapr_irq_post_load_xive(SpaprMachineState *spapr, int version_id)
 368{
 369    return 0;
 370}
 371
 372static void spapr_irq_reset_xive(SpaprMachineState *spapr, Error **errp)
 373{
 374    CPUState *cs;
 375
 376    CPU_FOREACH(cs) {
 377        PowerPCCPU *cpu = POWERPC_CPU(cs);
 378
 379        /* (TCG) Set the OS CAM line of the thread interrupt context. */
 380        spapr_xive_set_tctx_os_cam(spapr_cpu_state(cpu)->tctx);
 381    }
 382
 383    /* Activate the XIVE MMIOs */
 384    spapr_xive_mmio_set_enabled(spapr->xive, true);
 385}
 386
 387static void spapr_irq_set_irq_xive(void *opaque, int srcno, int val)
 388{
 389    SpaprMachineState *spapr = opaque;
 390
 391    xive_source_set_irq(&spapr->xive->source, srcno, val);
 392}
 393
 394static const char *spapr_irq_get_nodename_xive(SpaprMachineState *spapr)
 395{
 396    return spapr->xive->nodename;
 397}
 398
 399/*
 400 * XIVE uses the full IRQ number space. Set it to 8K to be compatible
 401 * with XICS.
 402 */
 403
 404#define SPAPR_IRQ_XIVE_NR_IRQS     0x2000
 405#define SPAPR_IRQ_XIVE_NR_MSIS     (SPAPR_IRQ_XIVE_NR_IRQS - SPAPR_IRQ_MSI)
 406
 407SpaprIrq spapr_irq_xive = {
 408    .nr_irqs     = SPAPR_IRQ_XIVE_NR_IRQS,
 409    .nr_msis     = SPAPR_IRQ_XIVE_NR_MSIS,
 410    .ov5         = SPAPR_OV5_XIVE_EXPLOIT,
 411
 412    .init        = spapr_irq_init_xive,
 413    .claim       = spapr_irq_claim_xive,
 414    .free        = spapr_irq_free_xive,
 415    .qirq        = spapr_qirq_xive,
 416    .print_info  = spapr_irq_print_info_xive,
 417    .dt_populate = spapr_dt_xive,
 418    .cpu_intc_create = spapr_irq_cpu_intc_create_xive,
 419    .post_load   = spapr_irq_post_load_xive,
 420    .reset       = spapr_irq_reset_xive,
 421    .set_irq     = spapr_irq_set_irq_xive,
 422    .get_nodename = spapr_irq_get_nodename_xive,
 423};
 424
 425/*
 426 * Dual XIVE and XICS IRQ backend.
 427 *
 428 * Both interrupt mode, XIVE and XICS, objects are created but the
 429 * machine starts in legacy interrupt mode (XICS). It can be changed
 430 * by the CAS negotiation process and, in that case, the new mode is
 431 * activated after an extra machine reset.
 432 */
 433
 434/*
 435 * Returns the sPAPR IRQ backend negotiated by CAS. XICS is the
 436 * default.
 437 */
 438static SpaprIrq *spapr_irq_current(SpaprMachineState *spapr)
 439{
 440    return spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ?
 441        &spapr_irq_xive : &spapr_irq_xics;
 442}
 443
 444static void spapr_irq_init_dual(SpaprMachineState *spapr, int nr_irqs,
 445                                Error **errp)
 446{
 447    MachineState *machine = MACHINE(spapr);
 448    Error *local_err = NULL;
 449
 450    if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) {
 451        error_setg(errp, "No KVM support for the 'dual' machine");
 452        return;
 453    }
 454
 455    spapr_irq_xics.init(spapr, spapr_irq_xics.nr_irqs, &local_err);
 456    if (local_err) {
 457        error_propagate(errp, local_err);
 458        return;
 459    }
 460
 461    spapr_irq_xive.init(spapr, spapr_irq_xive.nr_irqs, &local_err);
 462    if (local_err) {
 463        error_propagate(errp, local_err);
 464        return;
 465    }
 466}
 467
 468static int spapr_irq_claim_dual(SpaprMachineState *spapr, int irq, bool lsi,
 469                                Error **errp)
 470{
 471    Error *local_err = NULL;
 472    int ret;
 473
 474    ret = spapr_irq_xics.claim(spapr, irq, lsi, &local_err);
 475    if (local_err) {
 476        error_propagate(errp, local_err);
 477        return ret;
 478    }
 479
 480    ret = spapr_irq_xive.claim(spapr, irq, lsi, &local_err);
 481    if (local_err) {
 482        error_propagate(errp, local_err);
 483        return ret;
 484    }
 485
 486    return ret;
 487}
 488
 489static void spapr_irq_free_dual(SpaprMachineState *spapr, int irq, int num)
 490{
 491    spapr_irq_xics.free(spapr, irq, num);
 492    spapr_irq_xive.free(spapr, irq, num);
 493}
 494
 495static qemu_irq spapr_qirq_dual(SpaprMachineState *spapr, int irq)
 496{
 497    return spapr_irq_current(spapr)->qirq(spapr, irq);
 498}
 499
 500static void spapr_irq_print_info_dual(SpaprMachineState *spapr, Monitor *mon)
 501{
 502    spapr_irq_current(spapr)->print_info(spapr, mon);
 503}
 504
 505static void spapr_irq_dt_populate_dual(SpaprMachineState *spapr,
 506                                       uint32_t nr_servers, void *fdt,
 507                                       uint32_t phandle)
 508{
 509    spapr_irq_current(spapr)->dt_populate(spapr, nr_servers, fdt, phandle);
 510}
 511
 512static void spapr_irq_cpu_intc_create_dual(SpaprMachineState *spapr,
 513                                           PowerPCCPU *cpu, Error **errp)
 514{
 515    Error *local_err = NULL;
 516
 517    spapr_irq_xive.cpu_intc_create(spapr, cpu, &local_err);
 518    if (local_err) {
 519        error_propagate(errp, local_err);
 520        return;
 521    }
 522
 523    spapr_irq_xics.cpu_intc_create(spapr, cpu, errp);
 524}
 525
 526static int spapr_irq_post_load_dual(SpaprMachineState *spapr, int version_id)
 527{
 528    /*
 529     * Force a reset of the XIVE backend after migration. The machine
 530     * defaults to XICS at startup.
 531     */
 532    if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
 533        spapr_irq_xive.reset(spapr, &error_fatal);
 534    }
 535
 536    return spapr_irq_current(spapr)->post_load(spapr, version_id);
 537}
 538
 539static void spapr_irq_reset_dual(SpaprMachineState *spapr, Error **errp)
 540{
 541    /*
 542     * Deactivate the XIVE MMIOs. The XIVE backend will reenable them
 543     * if selected.
 544     */
 545    spapr_xive_mmio_set_enabled(spapr->xive, false);
 546
 547    spapr_irq_current(spapr)->reset(spapr, errp);
 548}
 549
 550static void spapr_irq_set_irq_dual(void *opaque, int srcno, int val)
 551{
 552    SpaprMachineState *spapr = opaque;
 553
 554    spapr_irq_current(spapr)->set_irq(spapr, srcno, val);
 555}
 556
 557static const char *spapr_irq_get_nodename_dual(SpaprMachineState *spapr)
 558{
 559    return spapr_irq_current(spapr)->get_nodename(spapr);
 560}
 561
 562/*
 563 * Define values in sync with the XIVE and XICS backend
 564 */
 565#define SPAPR_IRQ_DUAL_NR_IRQS     0x2000
 566#define SPAPR_IRQ_DUAL_NR_MSIS     (SPAPR_IRQ_DUAL_NR_IRQS - SPAPR_IRQ_MSI)
 567
 568SpaprIrq spapr_irq_dual = {
 569    .nr_irqs     = SPAPR_IRQ_DUAL_NR_IRQS,
 570    .nr_msis     = SPAPR_IRQ_DUAL_NR_MSIS,
 571    .ov5         = SPAPR_OV5_XIVE_BOTH,
 572
 573    .init        = spapr_irq_init_dual,
 574    .claim       = spapr_irq_claim_dual,
 575    .free        = spapr_irq_free_dual,
 576    .qirq        = spapr_qirq_dual,
 577    .print_info  = spapr_irq_print_info_dual,
 578    .dt_populate = spapr_irq_dt_populate_dual,
 579    .cpu_intc_create = spapr_irq_cpu_intc_create_dual,
 580    .post_load   = spapr_irq_post_load_dual,
 581    .reset       = spapr_irq_reset_dual,
 582    .set_irq     = spapr_irq_set_irq_dual,
 583    .get_nodename = spapr_irq_get_nodename_dual,
 584};
 585
 586
 587static void spapr_irq_check(SpaprMachineState *spapr, Error **errp)
 588{
 589    MachineState *machine = MACHINE(spapr);
 590
 591    /*
 592     * Sanity checks on non-P9 machines. On these, XIVE is not
 593     * advertised, see spapr_dt_ov5_platform_support()
 594     */
 595    if (!ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00,
 596                               0, spapr->max_compat_pvr)) {
 597        /*
 598         * If the 'dual' interrupt mode is selected, force XICS as CAS
 599         * negotiation is useless.
 600         */
 601        if (spapr->irq == &spapr_irq_dual) {
 602            spapr->irq = &spapr_irq_xics;
 603            return;
 604        }
 605
 606        /*
 607         * Non-P9 machines using only XIVE is a bogus setup. We have two
 608         * scenarios to take into account because of the compat mode:
 609         *
 610         * 1. POWER7/8 machines should fail to init later on when creating
 611         *    the XIVE interrupt presenters because a POWER9 exception
 612         *    model is required.
 613
 614         * 2. POWER9 machines using the POWER8 compat mode won't fail and
 615         *    will let the OS boot with a partial XIVE setup : DT
 616         *    properties but no hcalls.
 617         *
 618         * To cover both and not confuse the OS, add an early failure in
 619         * QEMU.
 620         */
 621        if (spapr->irq == &spapr_irq_xive) {
 622            error_setg(errp, "XIVE-only machines require a POWER9 CPU");
 623            return;
 624        }
 625    }
 626}
 627
 628/*
 629 * sPAPR IRQ frontend routines for devices
 630 */
 631void spapr_irq_init(SpaprMachineState *spapr, Error **errp)
 632{
 633    MachineState *machine = MACHINE(spapr);
 634    Error *local_err = NULL;
 635
 636    if (machine_kernel_irqchip_split(machine)) {
 637        error_setg(errp, "kernel_irqchip split mode not supported on pseries");
 638        return;
 639    }
 640
 641    if (!kvm_enabled() && machine_kernel_irqchip_required(machine)) {
 642        error_setg(errp,
 643                   "kernel_irqchip requested but only available with KVM");
 644        return;
 645    }
 646
 647    spapr_irq_check(spapr, &local_err);
 648    if (local_err) {
 649        error_propagate(errp, local_err);
 650        return;
 651    }
 652
 653    /* Initialize the MSI IRQ allocator. */
 654    if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
 655        spapr_irq_msi_init(spapr, spapr->irq->nr_msis);
 656    }
 657
 658    spapr->irq->init(spapr, spapr->irq->nr_irqs, errp);
 659
 660    spapr->qirqs = qemu_allocate_irqs(spapr->irq->set_irq, spapr,
 661                                      spapr->irq->nr_irqs);
 662}
 663
 664int spapr_irq_claim(SpaprMachineState *spapr, int irq, bool lsi, Error **errp)
 665{
 666    return spapr->irq->claim(spapr, irq, lsi, errp);
 667}
 668
 669void spapr_irq_free(SpaprMachineState *spapr, int irq, int num)
 670{
 671    spapr->irq->free(spapr, irq, num);
 672}
 673
 674qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq)
 675{
 676    return spapr->irq->qirq(spapr, irq);
 677}
 678
 679int spapr_irq_post_load(SpaprMachineState *spapr, int version_id)
 680{
 681    return spapr->irq->post_load(spapr, version_id);
 682}
 683
 684void spapr_irq_reset(SpaprMachineState *spapr, Error **errp)
 685{
 686    if (spapr->irq->reset) {
 687        spapr->irq->reset(spapr, errp);
 688    }
 689}
 690
 691int spapr_irq_get_phandle(SpaprMachineState *spapr, void *fdt, Error **errp)
 692{
 693    const char *nodename = spapr->irq->get_nodename(spapr);
 694    int offset, phandle;
 695
 696    offset = fdt_subnode_offset(fdt, 0, nodename);
 697    if (offset < 0) {
 698        error_setg(errp, "Can't find node \"%s\": %s", nodename,
 699                   fdt_strerror(offset));
 700        return -1;
 701    }
 702
 703    phandle = fdt_get_phandle(fdt, offset);
 704    if (!phandle) {
 705        error_setg(errp, "Can't get phandle of node \"%s\"", nodename);
 706        return -1;
 707    }
 708
 709    return phandle;
 710}
 711
 712/*
 713 * XICS legacy routines - to deprecate one day
 714 */
 715
 716static int ics_find_free_block(ICSState *ics, int num, int alignnum)
 717{
 718    int first, i;
 719
 720    for (first = 0; first < ics->nr_irqs; first += alignnum) {
 721        if (num > (ics->nr_irqs - first)) {
 722            return -1;
 723        }
 724        for (i = first; i < first + num; ++i) {
 725            if (!ICS_IRQ_FREE(ics, i)) {
 726                break;
 727            }
 728        }
 729        if (i == (first + num)) {
 730            return first;
 731        }
 732    }
 733
 734    return -1;
 735}
 736
 737int spapr_irq_find(SpaprMachineState *spapr, int num, bool align, Error **errp)
 738{
 739    ICSState *ics = spapr->ics;
 740    int first = -1;
 741
 742    assert(ics);
 743
 744    /*
 745     * MSIMesage::data is used for storing VIRQ so
 746     * it has to be aligned to num to support multiple
 747     * MSI vectors. MSI-X is not affected by this.
 748     * The hint is used for the first IRQ, the rest should
 749     * be allocated continuously.
 750     */
 751    if (align) {
 752        assert((num == 1) || (num == 2) || (num == 4) ||
 753               (num == 8) || (num == 16) || (num == 32));
 754        first = ics_find_free_block(ics, num, num);
 755    } else {
 756        first = ics_find_free_block(ics, num, 1);
 757    }
 758
 759    if (first < 0) {
 760        error_setg(errp, "can't find a free %d-IRQ block", num);
 761        return -1;
 762    }
 763
 764    return first + ics->offset;
 765}
 766
 767#define SPAPR_IRQ_XICS_LEGACY_NR_IRQS     0x400
 768
 769SpaprIrq spapr_irq_xics_legacy = {
 770    .nr_irqs     = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
 771    .nr_msis     = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
 772    .ov5         = SPAPR_OV5_XIVE_LEGACY,
 773
 774    .init        = spapr_irq_init_xics,
 775    .claim       = spapr_irq_claim_xics,
 776    .free        = spapr_irq_free_xics,
 777    .qirq        = spapr_qirq_xics,
 778    .print_info  = spapr_irq_print_info_xics,
 779    .dt_populate = spapr_dt_xics,
 780    .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
 781    .post_load   = spapr_irq_post_load_xics,
 782    .set_irq     = spapr_irq_set_irq_xics,
 783    .get_nodename = spapr_irq_get_nodename_xics,
 784};
 785