linux/arch/powerpc/kvm/book3s_xive.c
<<
>>
Prefs
   1/*
   2 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License, version 2, as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#define pr_fmt(fmt) "xive-kvm: " fmt
  10
  11#include <linux/kernel.h>
  12#include <linux/kvm_host.h>
  13#include <linux/err.h>
  14#include <linux/gfp.h>
  15#include <linux/spinlock.h>
  16#include <linux/delay.h>
  17#include <linux/percpu.h>
  18#include <linux/cpumask.h>
  19#include <linux/uaccess.h>
  20#include <asm/kvm_book3s.h>
  21#include <asm/kvm_ppc.h>
  22#include <asm/hvcall.h>
  23#include <asm/xics.h>
  24#include <asm/xive.h>
  25#include <asm/xive-regs.h>
  26#include <asm/debug.h>
  27#include <asm/debugfs.h>
  28#include <asm/time.h>
  29#include <asm/opal.h>
  30
  31#include <linux/debugfs.h>
  32#include <linux/seq_file.h>
  33
  34#include "book3s_xive.h"
  35
  36
  37/*
  38 * Virtual mode variants of the hcalls for use on radix/radix
  39 * with AIL. They require the VCPU's VP to be "pushed"
  40 *
  41 * We still instantiate them here because we use some of the
  42 * generated utility functions as well in this file.
  43 */
  44#define XIVE_RUNTIME_CHECKS
  45#define X_PFX xive_vm_
  46#define X_STATIC static
  47#define X_STAT_PFX stat_vm_
  48#define __x_tima                xive_tima
  49#define __x_eoi_page(xd)        ((void __iomem *)((xd)->eoi_mmio))
  50#define __x_trig_page(xd)       ((void __iomem *)((xd)->trig_mmio))
  51#define __x_writeb      __raw_writeb
  52#define __x_readw       __raw_readw
  53#define __x_readq       __raw_readq
  54#define __x_writeq      __raw_writeq
  55
  56#include "book3s_xive_template.c"
  57
  58/*
  59 * We leave a gap of a couple of interrupts in the queue to
  60 * account for the IPI and additional safety guard.
  61 */
  62#define XIVE_Q_GAP      2
  63
  64/*
  65 * This is a simple trigger for a generic XIVE IRQ. This must
  66 * only be called for interrupts that support a trigger page
  67 */
  68static bool xive_irq_trigger(struct xive_irq_data *xd)
  69{
  70        /* This should be only for MSIs */
  71        if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
  72                return false;
  73
  74        /* Those interrupts should always have a trigger page */
  75        if (WARN_ON(!xd->trig_mmio))
  76                return false;
  77
  78        out_be64(xd->trig_mmio, 0);
  79
  80        return true;
  81}
  82
  83static irqreturn_t xive_esc_irq(int irq, void *data)
  84{
  85        struct kvm_vcpu *vcpu = data;
  86
  87        vcpu->arch.irq_pending = 1;
  88        smp_mb();
  89        if (vcpu->arch.ceded)
  90                kvmppc_fast_vcpu_kick(vcpu);
  91
  92        /* Since we have the no-EOI flag, the interrupt is effectively
  93         * disabled now. Clearing xive_esc_on means we won't bother
  94         * doing so on the next entry.
  95         *
  96         * This also allows the entry code to know that if a PQ combination
  97         * of 10 is observed while xive_esc_on is true, it means the queue
  98         * contains an unprocessed escalation interrupt. We don't make use of
  99         * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
 100         */
 101        vcpu->arch.xive_esc_on = false;
 102
 103        return IRQ_HANDLED;
 104}
 105
 106static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
 107{
 108        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 109        struct xive_q *q = &xc->queues[prio];
 110        char *name = NULL;
 111        int rc;
 112
 113        /* Already there ? */
 114        if (xc->esc_virq[prio])
 115                return 0;
 116
 117        /* Hook up the escalation interrupt */
 118        xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
 119        if (!xc->esc_virq[prio]) {
 120                pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
 121                       prio, xc->server_num);
 122                return -EIO;
 123        }
 124
 125        if (xc->xive->single_escalation)
 126                name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
 127                                 vcpu->kvm->arch.lpid, xc->server_num);
 128        else
 129                name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
 130                                 vcpu->kvm->arch.lpid, xc->server_num, prio);
 131        if (!name) {
 132                pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
 133                       prio, xc->server_num);
 134                rc = -ENOMEM;
 135                goto error;
 136        }
 137
 138        pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
 139
 140        rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
 141                         IRQF_NO_THREAD, name, vcpu);
 142        if (rc) {
 143                pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
 144                       prio, xc->server_num);
 145                goto error;
 146        }
 147        xc->esc_virq_names[prio] = name;
 148
 149        /* In single escalation mode, we grab the ESB MMIO of the
 150         * interrupt and mask it. Also populate the VCPU v/raddr
 151         * of the ESB page for use by asm entry/exit code. Finally
 152         * set the XIVE_IRQ_NO_EOI flag which will prevent the
 153         * core code from performing an EOI on the escalation
 154         * interrupt, thus leaving it effectively masked after
 155         * it fires once.
 156         */
 157        if (xc->xive->single_escalation) {
 158                struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
 159                struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
 160
 161                xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
 162                vcpu->arch.xive_esc_raddr = xd->eoi_page;
 163                vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
 164                xd->flags |= XIVE_IRQ_NO_EOI;
 165        }
 166
 167        return 0;
 168error:
 169        irq_dispose_mapping(xc->esc_virq[prio]);
 170        xc->esc_virq[prio] = 0;
 171        kfree(name);
 172        return rc;
 173}
 174
 175static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
 176{
 177        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 178        struct kvmppc_xive *xive = xc->xive;
 179        struct xive_q *q =  &xc->queues[prio];
 180        void *qpage;
 181        int rc;
 182
 183        if (WARN_ON(q->qpage))
 184                return 0;
 185
 186        /* Allocate the queue and retrieve infos on current node for now */
 187        qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
 188        if (!qpage) {
 189                pr_err("Failed to allocate queue %d for VCPU %d\n",
 190                       prio, xc->server_num);
 191                return -ENOMEM;
 192        }
 193        memset(qpage, 0, 1 << xive->q_order);
 194
 195        /*
 196         * Reconfigure the queue. This will set q->qpage only once the
 197         * queue is fully configured. This is a requirement for prio 0
 198         * as we will stop doing EOIs for every IPI as soon as we observe
 199         * qpage being non-NULL, and instead will only EOI when we receive
 200         * corresponding queue 0 entries
 201         */
 202        rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
 203                                         xive->q_order, true);
 204        if (rc)
 205                pr_err("Failed to configure queue %d for VCPU %d\n",
 206                       prio, xc->server_num);
 207        return rc;
 208}
 209
 210/* Called with kvm_lock held */
 211static int xive_check_provisioning(struct kvm *kvm, u8 prio)
 212{
 213        struct kvmppc_xive *xive = kvm->arch.xive;
 214        struct kvm_vcpu *vcpu;
 215        int i, rc;
 216
 217        lockdep_assert_held(&kvm->lock);
 218
 219        /* Already provisioned ? */
 220        if (xive->qmap & (1 << prio))
 221                return 0;
 222
 223        pr_devel("Provisioning prio... %d\n", prio);
 224
 225        /* Provision each VCPU and enable escalations if needed */
 226        kvm_for_each_vcpu(i, vcpu, kvm) {
 227                if (!vcpu->arch.xive_vcpu)
 228                        continue;
 229                rc = xive_provision_queue(vcpu, prio);
 230                if (rc == 0 && !xive->single_escalation)
 231                        xive_attach_escalation(vcpu, prio);
 232                if (rc)
 233                        return rc;
 234        }
 235
 236        /* Order previous stores and mark it as provisioned */
 237        mb();
 238        xive->qmap |= (1 << prio);
 239        return 0;
 240}
 241
 242static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
 243{
 244        struct kvm_vcpu *vcpu;
 245        struct kvmppc_xive_vcpu *xc;
 246        struct xive_q *q;
 247
 248        /* Locate target server */
 249        vcpu = kvmppc_xive_find_server(kvm, server);
 250        if (!vcpu) {
 251                pr_warn("%s: Can't find server %d\n", __func__, server);
 252                return;
 253        }
 254        xc = vcpu->arch.xive_vcpu;
 255        if (WARN_ON(!xc))
 256                return;
 257
 258        q = &xc->queues[prio];
 259        atomic_inc(&q->pending_count);
 260}
 261
 262static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
 263{
 264        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 265        struct xive_q *q;
 266        u32 max;
 267
 268        if (WARN_ON(!xc))
 269                return -ENXIO;
 270        if (!xc->valid)
 271                return -ENXIO;
 272
 273        q = &xc->queues[prio];
 274        if (WARN_ON(!q->qpage))
 275                return -ENXIO;
 276
 277        /* Calculate max number of interrupts in that queue. */
 278        max = (q->msk + 1) - XIVE_Q_GAP;
 279        return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
 280}
 281
 282static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
 283{
 284        struct kvm_vcpu *vcpu;
 285        int i, rc;
 286
 287        /* Locate target server */
 288        vcpu = kvmppc_xive_find_server(kvm, *server);
 289        if (!vcpu) {
 290                pr_devel("Can't find server %d\n", *server);
 291                return -EINVAL;
 292        }
 293
 294        pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
 295
 296        /* Try pick it */
 297        rc = xive_try_pick_queue(vcpu, prio);
 298        if (rc == 0)
 299                return rc;
 300
 301        pr_devel(" .. failed, looking up candidate...\n");
 302
 303        /* Failed, pick another VCPU */
 304        kvm_for_each_vcpu(i, vcpu, kvm) {
 305                if (!vcpu->arch.xive_vcpu)
 306                        continue;
 307                rc = xive_try_pick_queue(vcpu, prio);
 308                if (rc == 0) {
 309                        *server = vcpu->arch.xive_vcpu->server_num;
 310                        pr_devel("  found on 0x%x/%d\n", *server, prio);
 311                        return rc;
 312                }
 313        }
 314        pr_devel("  no available target !\n");
 315
 316        /* No available target ! */
 317        return -EBUSY;
 318}
 319
 320static u32 xive_vp(struct kvmppc_xive *xive, u32 server)
 321{
 322        return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
 323}
 324
 325static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
 326                             struct kvmppc_xive_src_block *sb,
 327                             struct kvmppc_xive_irq_state *state)
 328{
 329        struct xive_irq_data *xd;
 330        u32 hw_num;
 331        u8 old_prio;
 332        u64 val;
 333
 334        /*
 335         * Take the lock, set masked, try again if racing
 336         * with H_EOI
 337         */
 338        for (;;) {
 339                arch_spin_lock(&sb->lock);
 340                old_prio = state->guest_priority;
 341                state->guest_priority = MASKED;
 342                mb();
 343                if (!state->in_eoi)
 344                        break;
 345                state->guest_priority = old_prio;
 346                arch_spin_unlock(&sb->lock);
 347        }
 348
 349        /* No change ? Bail */
 350        if (old_prio == MASKED)
 351                return old_prio;
 352
 353        /* Get the right irq */
 354        kvmppc_xive_select_irq(state, &hw_num, &xd);
 355
 356        /*
 357         * If the interrupt is marked as needing masking via
 358         * firmware, we do it here. Firmware masking however
 359         * is "lossy", it won't return the old p and q bits
 360         * and won't set the interrupt to a state where it will
 361         * record queued ones. If this is an issue we should do
 362         * lazy masking instead.
 363         *
 364         * For now, we work around this in unmask by forcing
 365         * an interrupt whenever we unmask a non-LSI via FW
 366         * (if ever).
 367         */
 368        if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
 369                xive_native_configure_irq(hw_num,
 370                                          xive_vp(xive, state->act_server),
 371                                          MASKED, state->number);
 372                /* set old_p so we can track if an H_EOI was done */
 373                state->old_p = true;
 374                state->old_q = false;
 375        } else {
 376                /* Set PQ to 10, return old P and old Q and remember them */
 377                val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
 378                state->old_p = !!(val & 2);
 379                state->old_q = !!(val & 1);
 380
 381                /*
 382                 * Synchronize hardware to sensure the queues are updated
 383                 * when masking
 384                 */
 385                xive_native_sync_source(hw_num);
 386        }
 387
 388        return old_prio;
 389}
 390
 391static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
 392                                 struct kvmppc_xive_irq_state *state)
 393{
 394        /*
 395         * Take the lock try again if racing with H_EOI
 396         */
 397        for (;;) {
 398                arch_spin_lock(&sb->lock);
 399                if (!state->in_eoi)
 400                        break;
 401                arch_spin_unlock(&sb->lock);
 402        }
 403}
 404
 405static void xive_finish_unmask(struct kvmppc_xive *xive,
 406                               struct kvmppc_xive_src_block *sb,
 407                               struct kvmppc_xive_irq_state *state,
 408                               u8 prio)
 409{
 410        struct xive_irq_data *xd;
 411        u32 hw_num;
 412
 413        /* If we aren't changing a thing, move on */
 414        if (state->guest_priority != MASKED)
 415                goto bail;
 416
 417        /* Get the right irq */
 418        kvmppc_xive_select_irq(state, &hw_num, &xd);
 419
 420        /*
 421         * See command in xive_lock_and_mask() concerning masking
 422         * via firmware.
 423         */
 424        if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
 425                xive_native_configure_irq(hw_num,
 426                                          xive_vp(xive, state->act_server),
 427                                          state->act_priority, state->number);
 428                /* If an EOI is needed, do it here */
 429                if (!state->old_p)
 430                        xive_vm_source_eoi(hw_num, xd);
 431                /* If this is not an LSI, force a trigger */
 432                if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
 433                        xive_irq_trigger(xd);
 434                goto bail;
 435        }
 436
 437        /* Old Q set, set PQ to 11 */
 438        if (state->old_q)
 439                xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
 440
 441        /*
 442         * If not old P, then perform an "effective" EOI,
 443         * on the source. This will handle the cases where
 444         * FW EOI is needed.
 445         */
 446        if (!state->old_p)
 447                xive_vm_source_eoi(hw_num, xd);
 448
 449        /* Synchronize ordering and mark unmasked */
 450        mb();
 451bail:
 452        state->guest_priority = prio;
 453}
 454
 455/*
 456 * Target an interrupt to a given server/prio, this will fallback
 457 * to another server if necessary and perform the HW targetting
 458 * updates as needed
 459 *
 460 * NOTE: Must be called with the state lock held
 461 */
 462static int xive_target_interrupt(struct kvm *kvm,
 463                                 struct kvmppc_xive_irq_state *state,
 464                                 u32 server, u8 prio)
 465{
 466        struct kvmppc_xive *xive = kvm->arch.xive;
 467        u32 hw_num;
 468        int rc;
 469
 470        /*
 471         * This will return a tentative server and actual
 472         * priority. The count for that new target will have
 473         * already been incremented.
 474         */
 475        rc = xive_select_target(kvm, &server, prio);
 476
 477        /*
 478         * We failed to find a target ? Not much we can do
 479         * at least until we support the GIQ.
 480         */
 481        if (rc)
 482                return rc;
 483
 484        /*
 485         * Increment the old queue pending count if there
 486         * was one so that the old queue count gets adjusted later
 487         * when observed to be empty.
 488         */
 489        if (state->act_priority != MASKED)
 490                xive_inc_q_pending(kvm,
 491                                   state->act_server,
 492                                   state->act_priority);
 493        /*
 494         * Update state and HW
 495         */
 496        state->act_priority = prio;
 497        state->act_server = server;
 498
 499        /* Get the right irq */
 500        kvmppc_xive_select_irq(state, &hw_num, NULL);
 501
 502        return xive_native_configure_irq(hw_num,
 503                                         xive_vp(xive, server),
 504                                         prio, state->number);
 505}
 506
 507/*
 508 * Targetting rules: In order to avoid losing track of
 509 * pending interrupts accross mask and unmask, which would
 510 * allow queue overflows, we implement the following rules:
 511 *
 512 *  - Unless it was never enabled (or we run out of capacity)
 513 *    an interrupt is always targetted at a valid server/queue
 514 *    pair even when "masked" by the guest. This pair tends to
 515 *    be the last one used but it can be changed under some
 516 *    circumstances. That allows us to separate targetting
 517 *    from masking, we only handle accounting during (re)targetting,
 518 *    this also allows us to let an interrupt drain into its target
 519 *    queue after masking, avoiding complex schemes to remove
 520 *    interrupts out of remote processor queues.
 521 *
 522 *  - When masking, we set PQ to 10 and save the previous value
 523 *    of P and Q.
 524 *
 525 *  - When unmasking, if saved Q was set, we set PQ to 11
 526 *    otherwise we leave PQ to the HW state which will be either
 527 *    10 if nothing happened or 11 if the interrupt fired while
 528 *    masked. Effectively we are OR'ing the previous Q into the
 529 *    HW Q.
 530 *
 531 *    Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
 532 *    which will unmask the interrupt and shoot a new one if Q was
 533 *    set.
 534 *
 535 *    Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
 536 *    effectively meaning an H_EOI from the guest is still expected
 537 *    for that interrupt).
 538 *
 539 *  - If H_EOI occurs while masked, we clear the saved P.
 540 *
 541 *  - When changing target, we account on the new target and
 542 *    increment a separate "pending" counter on the old one.
 543 *    This pending counter will be used to decrement the old
 544 *    target's count when its queue has been observed empty.
 545 */
 546
 547int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
 548                         u32 priority)
 549{
 550        struct kvmppc_xive *xive = kvm->arch.xive;
 551        struct kvmppc_xive_src_block *sb;
 552        struct kvmppc_xive_irq_state *state;
 553        u8 new_act_prio;
 554        int rc = 0;
 555        u16 idx;
 556
 557        if (!xive)
 558                return -ENODEV;
 559
 560        pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
 561                 irq, server, priority);
 562
 563        /* First, check provisioning of queues */
 564        if (priority != MASKED)
 565                rc = xive_check_provisioning(xive->kvm,
 566                              xive_prio_from_guest(priority));
 567        if (rc) {
 568                pr_devel("  provisioning failure %d !\n", rc);
 569                return rc;
 570        }
 571
 572        sb = kvmppc_xive_find_source(xive, irq, &idx);
 573        if (!sb)
 574                return -EINVAL;
 575        state = &sb->irq_state[idx];
 576
 577        /*
 578         * We first handle masking/unmasking since the locking
 579         * might need to be retried due to EOIs, we'll handle
 580         * targetting changes later. These functions will return
 581         * with the SB lock held.
 582         *
 583         * xive_lock_and_mask() will also set state->guest_priority
 584         * but won't otherwise change other fields of the state.
 585         *
 586         * xive_lock_for_unmask will not actually unmask, this will
 587         * be done later by xive_finish_unmask() once the targetting
 588         * has been done, so we don't try to unmask an interrupt
 589         * that hasn't yet been targetted.
 590         */
 591        if (priority == MASKED)
 592                xive_lock_and_mask(xive, sb, state);
 593        else
 594                xive_lock_for_unmask(sb, state);
 595
 596
 597        /*
 598         * Then we handle targetting.
 599         *
 600         * First calculate a new "actual priority"
 601         */
 602        new_act_prio = state->act_priority;
 603        if (priority != MASKED)
 604                new_act_prio = xive_prio_from_guest(priority);
 605
 606        pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
 607                 new_act_prio, state->act_server, state->act_priority);
 608
 609        /*
 610         * Then check if we actually need to change anything,
 611         *
 612         * The condition for re-targetting the interrupt is that
 613         * we have a valid new priority (new_act_prio is not 0xff)
 614         * and either the server or the priority changed.
 615         *
 616         * Note: If act_priority was ff and the new priority is
 617         *       also ff, we don't do anything and leave the interrupt
 618         *       untargetted. An attempt of doing an int_on on an
 619         *       untargetted interrupt will fail. If that is a problem
 620         *       we could initialize interrupts with valid default
 621         */
 622
 623        if (new_act_prio != MASKED &&
 624            (state->act_server != server ||
 625             state->act_priority != new_act_prio))
 626                rc = xive_target_interrupt(kvm, state, server, new_act_prio);
 627
 628        /*
 629         * Perform the final unmasking of the interrupt source
 630         * if necessary
 631         */
 632        if (priority != MASKED)
 633                xive_finish_unmask(xive, sb, state, priority);
 634
 635        /*
 636         * Finally Update saved_priority to match. Only int_on/off
 637         * set this field to a different value.
 638         */
 639        state->saved_priority = priority;
 640
 641        arch_spin_unlock(&sb->lock);
 642        return rc;
 643}
 644
 645int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
 646                         u32 *priority)
 647{
 648        struct kvmppc_xive *xive = kvm->arch.xive;
 649        struct kvmppc_xive_src_block *sb;
 650        struct kvmppc_xive_irq_state *state;
 651        u16 idx;
 652
 653        if (!xive)
 654                return -ENODEV;
 655
 656        sb = kvmppc_xive_find_source(xive, irq, &idx);
 657        if (!sb)
 658                return -EINVAL;
 659        state = &sb->irq_state[idx];
 660        arch_spin_lock(&sb->lock);
 661        *server = state->act_server;
 662        *priority = state->guest_priority;
 663        arch_spin_unlock(&sb->lock);
 664
 665        return 0;
 666}
 667
 668int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
 669{
 670        struct kvmppc_xive *xive = kvm->arch.xive;
 671        struct kvmppc_xive_src_block *sb;
 672        struct kvmppc_xive_irq_state *state;
 673        u16 idx;
 674
 675        if (!xive)
 676                return -ENODEV;
 677
 678        sb = kvmppc_xive_find_source(xive, irq, &idx);
 679        if (!sb)
 680                return -EINVAL;
 681        state = &sb->irq_state[idx];
 682
 683        pr_devel("int_on(irq=0x%x)\n", irq);
 684
 685        /*
 686         * Check if interrupt was not targetted
 687         */
 688        if (state->act_priority == MASKED) {
 689                pr_devel("int_on on untargetted interrupt\n");
 690                return -EINVAL;
 691        }
 692
 693        /* If saved_priority is 0xff, do nothing */
 694        if (state->saved_priority == MASKED)
 695                return 0;
 696
 697        /*
 698         * Lock and unmask it.
 699         */
 700        xive_lock_for_unmask(sb, state);
 701        xive_finish_unmask(xive, sb, state, state->saved_priority);
 702        arch_spin_unlock(&sb->lock);
 703
 704        return 0;
 705}
 706
 707int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
 708{
 709        struct kvmppc_xive *xive = kvm->arch.xive;
 710        struct kvmppc_xive_src_block *sb;
 711        struct kvmppc_xive_irq_state *state;
 712        u16 idx;
 713
 714        if (!xive)
 715                return -ENODEV;
 716
 717        sb = kvmppc_xive_find_source(xive, irq, &idx);
 718        if (!sb)
 719                return -EINVAL;
 720        state = &sb->irq_state[idx];
 721
 722        pr_devel("int_off(irq=0x%x)\n", irq);
 723
 724        /*
 725         * Lock and mask
 726         */
 727        state->saved_priority = xive_lock_and_mask(xive, sb, state);
 728        arch_spin_unlock(&sb->lock);
 729
 730        return 0;
 731}
 732
 733static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
 734{
 735        struct kvmppc_xive_src_block *sb;
 736        struct kvmppc_xive_irq_state *state;
 737        u16 idx;
 738
 739        sb = kvmppc_xive_find_source(xive, irq, &idx);
 740        if (!sb)
 741                return false;
 742        state = &sb->irq_state[idx];
 743        if (!state->valid)
 744                return false;
 745
 746        /*
 747         * Trigger the IPI. This assumes we never restore a pass-through
 748         * interrupt which should be safe enough
 749         */
 750        xive_irq_trigger(&state->ipi_data);
 751
 752        return true;
 753}
 754
 755u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
 756{
 757        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 758
 759        if (!xc)
 760                return 0;
 761
 762        /* Return the per-cpu state for state saving/migration */
 763        return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
 764               (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
 765               (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
 766}
 767
 768int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
 769{
 770        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 771        struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
 772        u8 cppr, mfrr;
 773        u32 xisr;
 774
 775        if (!xc || !xive)
 776                return -ENOENT;
 777
 778        /* Grab individual state fields. We don't use pending_pri */
 779        cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
 780        xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
 781                KVM_REG_PPC_ICP_XISR_MASK;
 782        mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
 783
 784        pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
 785                 xc->server_num, cppr, mfrr, xisr);
 786
 787        /*
 788         * We can't update the state of a "pushed" VCPU, but that
 789         * shouldn't happen.
 790         */
 791        if (WARN_ON(vcpu->arch.xive_pushed))
 792                return -EIO;
 793
 794        /* Update VCPU HW saved state */
 795        vcpu->arch.xive_saved_state.cppr = cppr;
 796        xc->hw_cppr = xc->cppr = cppr;
 797
 798        /*
 799         * Update MFRR state. If it's not 0xff, we mark the VCPU as
 800         * having a pending MFRR change, which will re-evaluate the
 801         * target. The VCPU will thus potentially get a spurious
 802         * interrupt but that's not a big deal.
 803         */
 804        xc->mfrr = mfrr;
 805        if (mfrr < cppr)
 806                xive_irq_trigger(&xc->vp_ipi_data);
 807
 808        /*
 809         * Now saved XIRR is "interesting". It means there's something in
 810         * the legacy "1 element" queue... for an IPI we simply ignore it,
 811         * as the MFRR restore will handle that. For anything else we need
 812         * to force a resend of the source.
 813         * However the source may not have been setup yet. If that's the
 814         * case, we keep that info and increment a counter in the xive to
 815         * tell subsequent xive_set_source() to go look.
 816         */
 817        if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
 818                xc->delayed_irq = xisr;
 819                xive->delayed_irqs++;
 820                pr_devel("  xisr restore delayed\n");
 821        }
 822
 823        return 0;
 824}
 825
 826int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
 827                           struct irq_desc *host_desc)
 828{
 829        struct kvmppc_xive *xive = kvm->arch.xive;
 830        struct kvmppc_xive_src_block *sb;
 831        struct kvmppc_xive_irq_state *state;
 832        struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
 833        unsigned int host_irq = irq_desc_get_irq(host_desc);
 834        unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
 835        u16 idx;
 836        u8 prio;
 837        int rc;
 838
 839        if (!xive)
 840                return -ENODEV;
 841
 842        pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
 843
 844        sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
 845        if (!sb)
 846                return -EINVAL;
 847        state = &sb->irq_state[idx];
 848
 849        /*
 850         * Mark the passed-through interrupt as going to a VCPU,
 851         * this will prevent further EOIs and similar operations
 852         * from the XIVE code. It will also mask the interrupt
 853         * to either PQ=10 or 11 state, the latter if the interrupt
 854         * is pending. This will allow us to unmask or retrigger it
 855         * after routing it to the guest with a simple EOI.
 856         *
 857         * The "state" argument is a "token", all it needs is to be
 858         * non-NULL to switch to passed-through or NULL for the
 859         * other way around. We may not yet have an actual VCPU
 860         * target here and we don't really care.
 861         */
 862        rc = irq_set_vcpu_affinity(host_irq, state);
 863        if (rc) {
 864                pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
 865                return rc;
 866        }
 867
 868        /*
 869         * Mask and read state of IPI. We need to know if its P bit
 870         * is set as that means it's potentially already using a
 871         * queue entry in the target
 872         */
 873        prio = xive_lock_and_mask(xive, sb, state);
 874        pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
 875                 state->old_p, state->old_q);
 876
 877        /* Turn the IPI hard off */
 878        xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
 879
 880        /* Grab info about irq */
 881        state->pt_number = hw_irq;
 882        state->pt_data = irq_data_get_irq_handler_data(host_data);
 883
 884        /*
 885         * Configure the IRQ to match the existing configuration of
 886         * the IPI if it was already targetted. Otherwise this will
 887         * mask the interrupt in a lossy way (act_priority is 0xff)
 888         * which is fine for a never started interrupt.
 889         */
 890        xive_native_configure_irq(hw_irq,
 891                                  xive_vp(xive, state->act_server),
 892                                  state->act_priority, state->number);
 893
 894        /*
 895         * We do an EOI to enable the interrupt (and retrigger if needed)
 896         * if the guest has the interrupt unmasked and the P bit was *not*
 897         * set in the IPI. If it was set, we know a slot may still be in
 898         * use in the target queue thus we have to wait for a guest
 899         * originated EOI
 900         */
 901        if (prio != MASKED && !state->old_p)
 902                xive_vm_source_eoi(hw_irq, state->pt_data);
 903
 904        /* Clear old_p/old_q as they are no longer relevant */
 905        state->old_p = state->old_q = false;
 906
 907        /* Restore guest prio (unlocks EOI) */
 908        mb();
 909        state->guest_priority = prio;
 910        arch_spin_unlock(&sb->lock);
 911
 912        return 0;
 913}
 914EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
 915
 916int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
 917                           struct irq_desc *host_desc)
 918{
 919        struct kvmppc_xive *xive = kvm->arch.xive;
 920        struct kvmppc_xive_src_block *sb;
 921        struct kvmppc_xive_irq_state *state;
 922        unsigned int host_irq = irq_desc_get_irq(host_desc);
 923        u16 idx;
 924        u8 prio;
 925        int rc;
 926
 927        if (!xive)
 928                return -ENODEV;
 929
 930        pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
 931
 932        sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
 933        if (!sb)
 934                return -EINVAL;
 935        state = &sb->irq_state[idx];
 936
 937        /*
 938         * Mask and read state of IRQ. We need to know if its P bit
 939         * is set as that means it's potentially already using a
 940         * queue entry in the target
 941         */
 942        prio = xive_lock_and_mask(xive, sb, state);
 943        pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
 944                 state->old_p, state->old_q);
 945
 946        /*
 947         * If old_p is set, the interrupt is pending, we switch it to
 948         * PQ=11. This will force a resend in the host so the interrupt
 949         * isn't lost to whatver host driver may pick it up
 950         */
 951        if (state->old_p)
 952                xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
 953
 954        /* Release the passed-through interrupt to the host */
 955        rc = irq_set_vcpu_affinity(host_irq, NULL);
 956        if (rc) {
 957                pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
 958                return rc;
 959        }
 960
 961        /* Forget about the IRQ */
 962        state->pt_number = 0;
 963        state->pt_data = NULL;
 964
 965        /* Reconfigure the IPI */
 966        xive_native_configure_irq(state->ipi_number,
 967                                  xive_vp(xive, state->act_server),
 968                                  state->act_priority, state->number);
 969
 970        /*
 971         * If old_p is set (we have a queue entry potentially
 972         * occupied) or the interrupt is masked, we set the IPI
 973         * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
 974         */
 975        if (prio == MASKED || state->old_p)
 976                xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
 977        else
 978                xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
 979
 980        /* Restore guest prio (unlocks EOI) */
 981        mb();
 982        state->guest_priority = prio;
 983        arch_spin_unlock(&sb->lock);
 984
 985        return 0;
 986}
 987EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
 988
 989static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
 990{
 991        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 992        struct kvm *kvm = vcpu->kvm;
 993        struct kvmppc_xive *xive = kvm->arch.xive;
 994        int i, j;
 995
 996        for (i = 0; i <= xive->max_sbid; i++) {
 997                struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
 998
 999                if (!sb)
1000                        continue;
1001                for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1002                        struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1003
1004                        if (!state->valid)
1005                                continue;
1006                        if (state->act_priority == MASKED)
1007                                continue;
1008                        if (state->act_server != xc->server_num)
1009                                continue;
1010
1011                        /* Clean it up */
1012                        arch_spin_lock(&sb->lock);
1013                        state->act_priority = MASKED;
1014                        xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1015                        xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1016                        if (state->pt_number) {
1017                                xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1018                                xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1019                        }
1020                        arch_spin_unlock(&sb->lock);
1021                }
1022        }
1023}
1024
1025void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1026{
1027        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1028        struct kvmppc_xive *xive = xc->xive;
1029        int i;
1030
1031        pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1032
1033        /* Ensure no interrupt is still routed to that VP */
1034        xc->valid = false;
1035        kvmppc_xive_disable_vcpu_interrupts(vcpu);
1036
1037        /* Mask the VP IPI */
1038        xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1039
1040        /* Disable the VP */
1041        xive_native_disable_vp(xc->vp_id);
1042
1043        /* Free the queues & associated interrupts */
1044        for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1045                struct xive_q *q = &xc->queues[i];
1046
1047                /* Free the escalation irq */
1048                if (xc->esc_virq[i]) {
1049                        free_irq(xc->esc_virq[i], vcpu);
1050                        irq_dispose_mapping(xc->esc_virq[i]);
1051                        kfree(xc->esc_virq_names[i]);
1052                }
1053                /* Free the queue */
1054                xive_native_disable_queue(xc->vp_id, q, i);
1055                if (q->qpage) {
1056                        free_pages((unsigned long)q->qpage,
1057                                   xive->q_page_order);
1058                        q->qpage = NULL;
1059                }
1060        }
1061
1062        /* Free the IPI */
1063        if (xc->vp_ipi) {
1064                xive_cleanup_irq_data(&xc->vp_ipi_data);
1065                xive_native_free_irq(xc->vp_ipi);
1066        }
1067        /* Free the VP */
1068        kfree(xc);
1069}
1070
1071int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1072                             struct kvm_vcpu *vcpu, u32 cpu)
1073{
1074        struct kvmppc_xive *xive = dev->private;
1075        struct kvmppc_xive_vcpu *xc;
1076        int i, r = -EBUSY;
1077
1078        pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1079
1080        if (dev->ops != &kvm_xive_ops) {
1081                pr_devel("Wrong ops !\n");
1082                return -EPERM;
1083        }
1084        if (xive->kvm != vcpu->kvm)
1085                return -EPERM;
1086        if (vcpu->arch.irq_type)
1087                return -EBUSY;
1088        if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
1089                pr_devel("Duplicate !\n");
1090                return -EEXIST;
1091        }
1092        if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
1093                pr_devel("Out of bounds !\n");
1094                return -EINVAL;
1095        }
1096        xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1097        if (!xc)
1098                return -ENOMEM;
1099
1100        /* We need to synchronize with queue provisioning */
1101        mutex_lock(&vcpu->kvm->lock);
1102        vcpu->arch.xive_vcpu = xc;
1103        xc->xive = xive;
1104        xc->vcpu = vcpu;
1105        xc->server_num = cpu;
1106        xc->vp_id = xive_vp(xive, cpu);
1107        xc->mfrr = 0xff;
1108        xc->valid = true;
1109
1110        r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1111        if (r)
1112                goto bail;
1113
1114        /* Configure VCPU fields for use by assembly push/pull */
1115        vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1116        vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1117
1118        /* Allocate IPI */
1119        xc->vp_ipi = xive_native_alloc_irq();
1120        if (!xc->vp_ipi) {
1121                pr_err("Failed to allocate xive irq for VCPU IPI\n");
1122                r = -EIO;
1123                goto bail;
1124        }
1125        pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1126
1127        r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1128        if (r)
1129                goto bail;
1130
1131        /*
1132         * Enable the VP first as the single escalation mode will
1133         * affect escalation interrupts numbering
1134         */
1135        r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1136        if (r) {
1137                pr_err("Failed to enable VP in OPAL, err %d\n", r);
1138                goto bail;
1139        }
1140
1141        /*
1142         * Initialize queues. Initially we set them all for no queueing
1143         * and we enable escalation for queue 0 only which we'll use for
1144         * our mfrr change notifications. If the VCPU is hot-plugged, we
1145         * do handle provisioning however based on the existing "map"
1146         * of enabled queues.
1147         */
1148        for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1149                struct xive_q *q = &xc->queues[i];
1150
1151                /* Single escalation, no queue 7 */
1152                if (i == 7 && xive->single_escalation)
1153                        break;
1154
1155                /* Is queue already enabled ? Provision it */
1156                if (xive->qmap & (1 << i)) {
1157                        r = xive_provision_queue(vcpu, i);
1158                        if (r == 0 && !xive->single_escalation)
1159                                xive_attach_escalation(vcpu, i);
1160                        if (r)
1161                                goto bail;
1162                } else {
1163                        r = xive_native_configure_queue(xc->vp_id,
1164                                                        q, i, NULL, 0, true);
1165                        if (r) {
1166                                pr_err("Failed to configure queue %d for VCPU %d\n",
1167                                       i, cpu);
1168                                goto bail;
1169                        }
1170                }
1171        }
1172
1173        /* If not done above, attach priority 0 escalation */
1174        r = xive_attach_escalation(vcpu, 0);
1175        if (r)
1176                goto bail;
1177
1178        /* Route the IPI */
1179        r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1180        if (!r)
1181                xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1182
1183bail:
1184        mutex_unlock(&vcpu->kvm->lock);
1185        if (r) {
1186                kvmppc_xive_cleanup_vcpu(vcpu);
1187                return r;
1188        }
1189
1190        vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1191        return 0;
1192}
1193
1194/*
1195 * Scanning of queues before/after migration save
1196 */
1197static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1198{
1199        struct kvmppc_xive_src_block *sb;
1200        struct kvmppc_xive_irq_state *state;
1201        u16 idx;
1202
1203        sb = kvmppc_xive_find_source(xive, irq, &idx);
1204        if (!sb)
1205                return;
1206
1207        state = &sb->irq_state[idx];
1208
1209        /* Some sanity checking */
1210        if (!state->valid) {
1211                pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1212                return;
1213        }
1214
1215        /*
1216         * If the interrupt is in a queue it should have P set.
1217         * We warn so that gets reported. A backtrace isn't useful
1218         * so no need to use a WARN_ON.
1219         */
1220        if (!state->saved_p)
1221                pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1222
1223        /* Set flag */
1224        state->in_queue = true;
1225}
1226
1227static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1228                                   struct kvmppc_xive_src_block *sb,
1229                                   u32 irq)
1230{
1231        struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1232
1233        if (!state->valid)
1234                return;
1235
1236        /* Mask and save state, this will also sync HW queues */
1237        state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1238
1239        /* Transfer P and Q */
1240        state->saved_p = state->old_p;
1241        state->saved_q = state->old_q;
1242
1243        /* Unlock */
1244        arch_spin_unlock(&sb->lock);
1245}
1246
1247static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1248                                     struct kvmppc_xive_src_block *sb,
1249                                     u32 irq)
1250{
1251        struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1252
1253        if (!state->valid)
1254                return;
1255
1256        /*
1257         * Lock / exclude EOI (not technically necessary if the
1258         * guest isn't running concurrently. If this becomes a
1259         * performance issue we can probably remove the lock.
1260         */
1261        xive_lock_for_unmask(sb, state);
1262
1263        /* Restore mask/prio if it wasn't masked */
1264        if (state->saved_scan_prio != MASKED)
1265                xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1266
1267        /* Unlock */
1268        arch_spin_unlock(&sb->lock);
1269}
1270
1271static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1272{
1273        u32 idx = q->idx;
1274        u32 toggle = q->toggle;
1275        u32 irq;
1276
1277        do {
1278                irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1279                if (irq > XICS_IPI)
1280                        xive_pre_save_set_queued(xive, irq);
1281        } while(irq);
1282}
1283
1284static void xive_pre_save_scan(struct kvmppc_xive *xive)
1285{
1286        struct kvm_vcpu *vcpu = NULL;
1287        int i, j;
1288
1289        /*
1290         * See comment in xive_get_source() about how this
1291         * work. Collect a stable state for all interrupts
1292         */
1293        for (i = 0; i <= xive->max_sbid; i++) {
1294                struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1295                if (!sb)
1296                        continue;
1297                for (j = 0;  j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1298                        xive_pre_save_mask_irq(xive, sb, j);
1299        }
1300
1301        /* Then scan the queues and update the "in_queue" flag */
1302        kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1303                struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1304                if (!xc)
1305                        continue;
1306                for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1307                        if (xc->queues[j].qpage)
1308                                xive_pre_save_queue(xive, &xc->queues[j]);
1309                }
1310        }
1311
1312        /* Finally restore interrupt states */
1313        for (i = 0; i <= xive->max_sbid; i++) {
1314                struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1315                if (!sb)
1316                        continue;
1317                for (j = 0;  j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1318                        xive_pre_save_unmask_irq(xive, sb, j);
1319        }
1320}
1321
1322static void xive_post_save_scan(struct kvmppc_xive *xive)
1323{
1324        u32 i, j;
1325
1326        /* Clear all the in_queue flags */
1327        for (i = 0; i <= xive->max_sbid; i++) {
1328                struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1329                if (!sb)
1330                        continue;
1331                for (j = 0;  j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1332                        sb->irq_state[j].in_queue = false;
1333        }
1334
1335        /* Next get_source() will do a new scan */
1336        xive->saved_src_count = 0;
1337}
1338
1339/*
1340 * This returns the source configuration and state to user space.
1341 */
1342static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1343{
1344        struct kvmppc_xive_src_block *sb;
1345        struct kvmppc_xive_irq_state *state;
1346        u64 __user *ubufp = (u64 __user *) addr;
1347        u64 val, prio;
1348        u16 idx;
1349
1350        sb = kvmppc_xive_find_source(xive, irq, &idx);
1351        if (!sb)
1352                return -ENOENT;
1353
1354        state = &sb->irq_state[idx];
1355
1356        if (!state->valid)
1357                return -ENOENT;
1358
1359        pr_devel("get_source(%ld)...\n", irq);
1360
1361        /*
1362         * So to properly save the state into something that looks like a
1363         * XICS migration stream we cannot treat interrupts individually.
1364         *
1365         * We need, instead, mask them all (& save their previous PQ state)
1366         * to get a stable state in the HW, then sync them to ensure that
1367         * any interrupt that had already fired hits its queue, and finally
1368         * scan all the queues to collect which interrupts are still present
1369         * in the queues, so we can set the "pending" flag on them and
1370         * they can be resent on restore.
1371         *
1372         * So we do it all when the "first" interrupt gets saved, all the
1373         * state is collected at that point, the rest of xive_get_source()
1374         * will merely collect and convert that state to the expected
1375         * userspace bit mask.
1376         */
1377        if (xive->saved_src_count == 0)
1378                xive_pre_save_scan(xive);
1379        xive->saved_src_count++;
1380
1381        /* Convert saved state into something compatible with xics */
1382        val = state->act_server;
1383        prio = state->saved_scan_prio;
1384
1385        if (prio == MASKED) {
1386                val |= KVM_XICS_MASKED;
1387                prio = state->saved_priority;
1388        }
1389        val |= prio << KVM_XICS_PRIORITY_SHIFT;
1390        if (state->lsi) {
1391                val |= KVM_XICS_LEVEL_SENSITIVE;
1392                if (state->saved_p)
1393                        val |= KVM_XICS_PENDING;
1394        } else {
1395                if (state->saved_p)
1396                        val |= KVM_XICS_PRESENTED;
1397
1398                if (state->saved_q)
1399                        val |= KVM_XICS_QUEUED;
1400
1401                /*
1402                 * We mark it pending (which will attempt a re-delivery)
1403                 * if we are in a queue *or* we were masked and had
1404                 * Q set which is equivalent to the XICS "masked pending"
1405                 * state
1406                 */
1407                if (state->in_queue || (prio == MASKED && state->saved_q))
1408                        val |= KVM_XICS_PENDING;
1409        }
1410
1411        /*
1412         * If that was the last interrupt saved, reset the
1413         * in_queue flags
1414         */
1415        if (xive->saved_src_count == xive->src_count)
1416                xive_post_save_scan(xive);
1417
1418        /* Copy the result to userspace */
1419        if (put_user(val, ubufp))
1420                return -EFAULT;
1421
1422        return 0;
1423}
1424
1425static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive,
1426                                                           int irq)
1427{
1428        struct kvm *kvm = xive->kvm;
1429        struct kvmppc_xive_src_block *sb;
1430        int i, bid;
1431
1432        bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1433
1434        mutex_lock(&kvm->lock);
1435
1436        /* block already exists - somebody else got here first */
1437        if (xive->src_blocks[bid])
1438                goto out;
1439
1440        /* Create the ICS */
1441        sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1442        if (!sb)
1443                goto out;
1444
1445        sb->id = bid;
1446
1447        for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1448                sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1449                sb->irq_state[i].guest_priority = MASKED;
1450                sb->irq_state[i].saved_priority = MASKED;
1451                sb->irq_state[i].act_priority = MASKED;
1452        }
1453        smp_wmb();
1454        xive->src_blocks[bid] = sb;
1455
1456        if (bid > xive->max_sbid)
1457                xive->max_sbid = bid;
1458
1459out:
1460        mutex_unlock(&kvm->lock);
1461        return xive->src_blocks[bid];
1462}
1463
1464static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1465{
1466        struct kvm *kvm = xive->kvm;
1467        struct kvm_vcpu *vcpu = NULL;
1468        int i;
1469
1470        kvm_for_each_vcpu(i, vcpu, kvm) {
1471                struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1472
1473                if (!xc)
1474                        continue;
1475
1476                if (xc->delayed_irq == irq) {
1477                        xc->delayed_irq = 0;
1478                        xive->delayed_irqs--;
1479                        return true;
1480                }
1481        }
1482        return false;
1483}
1484
1485static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1486{
1487        struct kvmppc_xive_src_block *sb;
1488        struct kvmppc_xive_irq_state *state;
1489        u64 __user *ubufp = (u64 __user *) addr;
1490        u16 idx;
1491        u64 val;
1492        u8 act_prio, guest_prio;
1493        u32 server;
1494        int rc = 0;
1495
1496        if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1497                return -ENOENT;
1498
1499        pr_devel("set_source(irq=0x%lx)\n", irq);
1500
1501        /* Find the source */
1502        sb = kvmppc_xive_find_source(xive, irq, &idx);
1503        if (!sb) {
1504                pr_devel("No source, creating source block...\n");
1505                sb = xive_create_src_block(xive, irq);
1506                if (!sb) {
1507                        pr_devel("Failed to create block...\n");
1508                        return -ENOMEM;
1509                }
1510        }
1511        state = &sb->irq_state[idx];
1512
1513        /* Read user passed data */
1514        if (get_user(val, ubufp)) {
1515                pr_devel("fault getting user info !\n");
1516                return -EFAULT;
1517        }
1518
1519        server = val & KVM_XICS_DESTINATION_MASK;
1520        guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1521
1522        pr_devel("  val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1523                 val, server, guest_prio);
1524
1525        /*
1526         * If the source doesn't already have an IPI, allocate
1527         * one and get the corresponding data
1528         */
1529        if (!state->ipi_number) {
1530                state->ipi_number = xive_native_alloc_irq();
1531                if (state->ipi_number == 0) {
1532                        pr_devel("Failed to allocate IPI !\n");
1533                        return -ENOMEM;
1534                }
1535                xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1536                pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1537        }
1538
1539        /*
1540         * We use lock_and_mask() to set us in the right masked
1541         * state. We will override that state from the saved state
1542         * further down, but this will handle the cases of interrupts
1543         * that need FW masking. We set the initial guest_priority to
1544         * 0 before calling it to ensure it actually performs the masking.
1545         */
1546        state->guest_priority = 0;
1547        xive_lock_and_mask(xive, sb, state);
1548
1549        /*
1550         * Now, we select a target if we have one. If we don't we
1551         * leave the interrupt untargetted. It means that an interrupt
1552         * can become "untargetted" accross migration if it was masked
1553         * by set_xive() but there is little we can do about it.
1554         */
1555
1556        /* First convert prio and mark interrupt as untargetted */
1557        act_prio = xive_prio_from_guest(guest_prio);
1558        state->act_priority = MASKED;
1559
1560        /*
1561         * We need to drop the lock due to the mutex below. Hopefully
1562         * nothing is touching that interrupt yet since it hasn't been
1563         * advertized to a running guest yet
1564         */
1565        arch_spin_unlock(&sb->lock);
1566
1567        /* If we have a priority target the interrupt */
1568        if (act_prio != MASKED) {
1569                /* First, check provisioning of queues */
1570                mutex_lock(&xive->kvm->lock);
1571                rc = xive_check_provisioning(xive->kvm, act_prio);
1572                mutex_unlock(&xive->kvm->lock);
1573
1574                /* Target interrupt */
1575                if (rc == 0)
1576                        rc = xive_target_interrupt(xive->kvm, state,
1577                                                   server, act_prio);
1578                /*
1579                 * If provisioning or targetting failed, leave it
1580                 * alone and masked. It will remain disabled until
1581                 * the guest re-targets it.
1582                 */
1583        }
1584
1585        /*
1586         * Find out if this was a delayed irq stashed in an ICP,
1587         * in which case, treat it as pending
1588         */
1589        if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1590                val |= KVM_XICS_PENDING;
1591                pr_devel("  Found delayed ! forcing PENDING !\n");
1592        }
1593
1594        /* Cleanup the SW state */
1595        state->old_p = false;
1596        state->old_q = false;
1597        state->lsi = false;
1598        state->asserted = false;
1599
1600        /* Restore LSI state */
1601        if (val & KVM_XICS_LEVEL_SENSITIVE) {
1602                state->lsi = true;
1603                if (val & KVM_XICS_PENDING)
1604                        state->asserted = true;
1605                pr_devel("  LSI ! Asserted=%d\n", state->asserted);
1606        }
1607
1608        /*
1609         * Restore P and Q. If the interrupt was pending, we
1610         * force Q and !P, which will trigger a resend.
1611         *
1612         * That means that a guest that had both an interrupt
1613         * pending (queued) and Q set will restore with only
1614         * one instance of that interrupt instead of 2, but that
1615         * is perfectly fine as coalescing interrupts that haven't
1616         * been presented yet is always allowed.
1617         */
1618        if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1619                state->old_p = true;
1620        if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1621                state->old_q = true;
1622
1623        pr_devel("  P=%d, Q=%d\n", state->old_p, state->old_q);
1624
1625        /*
1626         * If the interrupt was unmasked, update guest priority and
1627         * perform the appropriate state transition and do a
1628         * re-trigger if necessary.
1629         */
1630        if (val & KVM_XICS_MASKED) {
1631                pr_devel("  masked, saving prio\n");
1632                state->guest_priority = MASKED;
1633                state->saved_priority = guest_prio;
1634        } else {
1635                pr_devel("  unmasked, restoring to prio %d\n", guest_prio);
1636                xive_finish_unmask(xive, sb, state, guest_prio);
1637                state->saved_priority = guest_prio;
1638        }
1639
1640        /* Increment the number of valid sources and mark this one valid */
1641        if (!state->valid)
1642                xive->src_count++;
1643        state->valid = true;
1644
1645        return 0;
1646}
1647
1648int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1649                        bool line_status)
1650{
1651        struct kvmppc_xive *xive = kvm->arch.xive;
1652        struct kvmppc_xive_src_block *sb;
1653        struct kvmppc_xive_irq_state *state;
1654        u16 idx;
1655
1656        if (!xive)
1657                return -ENODEV;
1658
1659        sb = kvmppc_xive_find_source(xive, irq, &idx);
1660        if (!sb)
1661                return -EINVAL;
1662
1663        /* Perform locklessly .... (we need to do some RCUisms here...) */
1664        state = &sb->irq_state[idx];
1665        if (!state->valid)
1666                return -EINVAL;
1667
1668        /* We don't allow a trigger on a passed-through interrupt */
1669        if (state->pt_number)
1670                return -EINVAL;
1671
1672        if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1673                state->asserted = 1;
1674        else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1675                state->asserted = 0;
1676                return 0;
1677        }
1678
1679        /* Trigger the IPI */
1680        xive_irq_trigger(&state->ipi_data);
1681
1682        return 0;
1683}
1684
1685static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1686{
1687        struct kvmppc_xive *xive = dev->private;
1688
1689        /* We honor the existing XICS ioctl */
1690        switch (attr->group) {
1691        case KVM_DEV_XICS_GRP_SOURCES:
1692                return xive_set_source(xive, attr->attr, attr->addr);
1693        }
1694        return -ENXIO;
1695}
1696
1697static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1698{
1699        struct kvmppc_xive *xive = dev->private;
1700
1701        /* We honor the existing XICS ioctl */
1702        switch (attr->group) {
1703        case KVM_DEV_XICS_GRP_SOURCES:
1704                return xive_get_source(xive, attr->attr, attr->addr);
1705        }
1706        return -ENXIO;
1707}
1708
1709static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1710{
1711        /* We honor the same limits as XICS, at least for now */
1712        switch (attr->group) {
1713        case KVM_DEV_XICS_GRP_SOURCES:
1714                if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1715                    attr->attr < KVMPPC_XICS_NR_IRQS)
1716                        return 0;
1717                break;
1718        }
1719        return -ENXIO;
1720}
1721
1722static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1723{
1724        xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1725        xive_native_configure_irq(hw_num, 0, MASKED, 0);
1726        xive_cleanup_irq_data(xd);
1727}
1728
1729static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1730{
1731        int i;
1732
1733        for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1734                struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1735
1736                if (!state->valid)
1737                        continue;
1738
1739                kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1740                xive_native_free_irq(state->ipi_number);
1741
1742                /* Pass-through, cleanup too */
1743                if (state->pt_number)
1744                        kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1745
1746                state->valid = false;
1747        }
1748}
1749
1750static void kvmppc_xive_free(struct kvm_device *dev)
1751{
1752        struct kvmppc_xive *xive = dev->private;
1753        struct kvm *kvm = xive->kvm;
1754        int i;
1755
1756        debugfs_remove(xive->dentry);
1757
1758        if (kvm)
1759                kvm->arch.xive = NULL;
1760
1761        /* Mask and free interrupts */
1762        for (i = 0; i <= xive->max_sbid; i++) {
1763                if (xive->src_blocks[i])
1764                        kvmppc_xive_free_sources(xive->src_blocks[i]);
1765                kfree(xive->src_blocks[i]);
1766                xive->src_blocks[i] = NULL;
1767        }
1768
1769        if (xive->vp_base != XIVE_INVALID_VP)
1770                xive_native_free_vp_block(xive->vp_base);
1771
1772
1773        kfree(xive);
1774        kfree(dev);
1775}
1776
1777static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
1778{
1779        struct kvmppc_xive *xive;
1780        struct kvm *kvm = dev->kvm;
1781        int ret = 0;
1782
1783        pr_devel("Creating xive for partition\n");
1784
1785        xive = kzalloc(sizeof(*xive), GFP_KERNEL);
1786        if (!xive)
1787                return -ENOMEM;
1788
1789        dev->private = xive;
1790        xive->dev = dev;
1791        xive->kvm = kvm;
1792
1793        /* Already there ? */
1794        if (kvm->arch.xive)
1795                ret = -EEXIST;
1796        else
1797                kvm->arch.xive = xive;
1798
1799        /* We use the default queue size set by the host */
1800        xive->q_order = xive_native_default_eq_shift();
1801        if (xive->q_order < PAGE_SHIFT)
1802                xive->q_page_order = 0;
1803        else
1804                xive->q_page_order = xive->q_order - PAGE_SHIFT;
1805
1806        /* Allocate a bunch of VPs */
1807        xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
1808        pr_devel("VP_Base=%x\n", xive->vp_base);
1809
1810        if (xive->vp_base == XIVE_INVALID_VP)
1811                ret = -ENOMEM;
1812
1813        xive->single_escalation = xive_native_has_single_escalation();
1814
1815        if (ret) {
1816                kfree(xive);
1817                return ret;
1818        }
1819
1820        return 0;
1821}
1822
1823
1824static int xive_debug_show(struct seq_file *m, void *private)
1825{
1826        struct kvmppc_xive *xive = m->private;
1827        struct kvm *kvm = xive->kvm;
1828        struct kvm_vcpu *vcpu;
1829        u64 t_rm_h_xirr = 0;
1830        u64 t_rm_h_ipoll = 0;
1831        u64 t_rm_h_cppr = 0;
1832        u64 t_rm_h_eoi = 0;
1833        u64 t_rm_h_ipi = 0;
1834        u64 t_vm_h_xirr = 0;
1835        u64 t_vm_h_ipoll = 0;
1836        u64 t_vm_h_cppr = 0;
1837        u64 t_vm_h_eoi = 0;
1838        u64 t_vm_h_ipi = 0;
1839        unsigned int i;
1840
1841        if (!kvm)
1842                return 0;
1843
1844        seq_printf(m, "=========\nVCPU state\n=========\n");
1845
1846        kvm_for_each_vcpu(i, vcpu, kvm) {
1847                struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1848                unsigned int i;
1849
1850                if (!xc)
1851                        continue;
1852
1853                seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
1854                           " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
1855                           xc->server_num, xc->cppr, xc->hw_cppr,
1856                           xc->mfrr, xc->pending,
1857                           xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
1858                for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1859                        struct xive_q *q = &xc->queues[i];
1860                        u32 i0, i1, idx;
1861
1862                        if (!q->qpage && !xc->esc_virq[i])
1863                                continue;
1864
1865                        seq_printf(m, " [q%d]: ", i);
1866
1867                        if (q->qpage) {
1868                                idx = q->idx;
1869                                i0 = be32_to_cpup(q->qpage + idx);
1870                                idx = (idx + 1) & q->msk;
1871                                i1 = be32_to_cpup(q->qpage + idx);
1872                                seq_printf(m, "T=%d %08x %08x... \n", q->toggle, i0, i1);
1873                        }
1874                        if (xc->esc_virq[i]) {
1875                                struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
1876                                struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1877                                u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
1878                                seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
1879                                           (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
1880                                           (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
1881                                           xc->esc_virq[i], pq, xd->eoi_page);
1882                                seq_printf(m, "\n");
1883                        }
1884                }
1885
1886                t_rm_h_xirr += xc->stat_rm_h_xirr;
1887                t_rm_h_ipoll += xc->stat_rm_h_ipoll;
1888                t_rm_h_cppr += xc->stat_rm_h_cppr;
1889                t_rm_h_eoi += xc->stat_rm_h_eoi;
1890                t_rm_h_ipi += xc->stat_rm_h_ipi;
1891                t_vm_h_xirr += xc->stat_vm_h_xirr;
1892                t_vm_h_ipoll += xc->stat_vm_h_ipoll;
1893                t_vm_h_cppr += xc->stat_vm_h_cppr;
1894                t_vm_h_eoi += xc->stat_vm_h_eoi;
1895                t_vm_h_ipi += xc->stat_vm_h_ipi;
1896        }
1897
1898        seq_printf(m, "Hcalls totals\n");
1899        seq_printf(m, " H_XIRR  R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
1900        seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
1901        seq_printf(m, " H_CPPR  R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
1902        seq_printf(m, " H_EOI   R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
1903        seq_printf(m, " H_IPI   R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
1904
1905        return 0;
1906}
1907
1908static int xive_debug_open(struct inode *inode, struct file *file)
1909{
1910        return single_open(file, xive_debug_show, inode->i_private);
1911}
1912
1913static const struct file_operations xive_debug_fops = {
1914        .open = xive_debug_open,
1915        .read = seq_read,
1916        .llseek = seq_lseek,
1917        .release = single_release,
1918};
1919
1920static void xive_debugfs_init(struct kvmppc_xive *xive)
1921{
1922        char *name;
1923
1924        name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
1925        if (!name) {
1926                pr_err("%s: no memory for name\n", __func__);
1927                return;
1928        }
1929
1930        xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
1931                                           xive, &xive_debug_fops);
1932
1933        pr_debug("%s: created %s\n", __func__, name);
1934        kfree(name);
1935}
1936
1937static void kvmppc_xive_init(struct kvm_device *dev)
1938{
1939        struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
1940
1941        /* Register some debug interfaces */
1942        xive_debugfs_init(xive);
1943}
1944
1945struct kvm_device_ops kvm_xive_ops = {
1946        .name = "kvm-xive",
1947        .create = kvmppc_xive_create,
1948        .init = kvmppc_xive_init,
1949        .destroy = kvmppc_xive_free,
1950        .set_attr = xive_set_attr,
1951        .get_attr = xive_get_attr,
1952        .has_attr = xive_has_attr,
1953};
1954
1955void kvmppc_xive_init_module(void)
1956{
1957        __xive_vm_h_xirr = xive_vm_h_xirr;
1958        __xive_vm_h_ipoll = xive_vm_h_ipoll;
1959        __xive_vm_h_ipi = xive_vm_h_ipi;
1960        __xive_vm_h_cppr = xive_vm_h_cppr;
1961        __xive_vm_h_eoi = xive_vm_h_eoi;
1962}
1963
1964void kvmppc_xive_exit_module(void)
1965{
1966        __xive_vm_h_xirr = NULL;
1967        __xive_vm_h_ipoll = NULL;
1968        __xive_vm_h_ipi = NULL;
1969        __xive_vm_h_cppr = NULL;
1970        __xive_vm_h_eoi = NULL;
1971}
1972