linux/arch/powerpc/kvm/book3s_xive.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
   4 */
   5
   6#define pr_fmt(fmt) "xive-kvm: " fmt
   7
   8#include <linux/kernel.h>
   9#include <linux/kvm_host.h>
  10#include <linux/err.h>
  11#include <linux/gfp.h>
  12#include <linux/spinlock.h>
  13#include <linux/delay.h>
  14#include <linux/percpu.h>
  15#include <linux/cpumask.h>
  16#include <linux/uaccess.h>
  17#include <asm/kvm_book3s.h>
  18#include <asm/kvm_ppc.h>
  19#include <asm/hvcall.h>
  20#include <asm/xics.h>
  21#include <asm/xive.h>
  22#include <asm/xive-regs.h>
  23#include <asm/debug.h>
  24#include <asm/debugfs.h>
  25#include <asm/time.h>
  26#include <asm/opal.h>
  27
  28#include <linux/debugfs.h>
  29#include <linux/seq_file.h>
  30
  31#include "book3s_xive.h"
  32
  33
  34/*
  35 * Virtual mode variants of the hcalls for use on radix/radix
  36 * with AIL. They require the VCPU's VP to be "pushed"
  37 *
  38 * We still instantiate them here because we use some of the
  39 * generated utility functions as well in this file.
  40 */
  41#define XIVE_RUNTIME_CHECKS
  42#define X_PFX xive_vm_
  43#define X_STATIC static
  44#define X_STAT_PFX stat_vm_
  45#define __x_tima                xive_tima
  46#define __x_eoi_page(xd)        ((void __iomem *)((xd)->eoi_mmio))
  47#define __x_trig_page(xd)       ((void __iomem *)((xd)->trig_mmio))
  48#define __x_writeb      __raw_writeb
  49#define __x_readw       __raw_readw
  50#define __x_readq       __raw_readq
  51#define __x_writeq      __raw_writeq
  52
  53#include "book3s_xive_template.c"
  54
  55/*
  56 * We leave a gap of a couple of interrupts in the queue to
  57 * account for the IPI and additional safety guard.
  58 */
  59#define XIVE_Q_GAP      2
  60
  61/*
  62 * Push a vcpu's context to the XIVE on guest entry.
  63 * This assumes we are in virtual mode (MMU on)
  64 */
  65void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
  66{
  67        void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
  68        u64 pq;
  69
  70        /*
  71         * Nothing to do if the platform doesn't have a XIVE
  72         * or this vCPU doesn't have its own XIVE context
  73         * (e.g. because it's not using an in-kernel interrupt controller).
  74         */
  75        if (!tima || !vcpu->arch.xive_cam_word)
  76                return;
  77
  78        eieio();
  79        __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
  80        __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
  81        vcpu->arch.xive_pushed = 1;
  82        eieio();
  83
  84        /*
  85         * We clear the irq_pending flag. There is a small chance of a
  86         * race vs. the escalation interrupt happening on another
  87         * processor setting it again, but the only consequence is to
  88         * cause a spurious wakeup on the next H_CEDE, which is not an
  89         * issue.
  90         */
  91        vcpu->arch.irq_pending = 0;
  92
  93        /*
  94         * In single escalation mode, if the escalation interrupt is
  95         * on, we mask it.
  96         */
  97        if (vcpu->arch.xive_esc_on) {
  98                pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
  99                                                  XIVE_ESB_SET_PQ_01));
 100                mb();
 101
 102                /*
 103                 * We have a possible subtle race here: The escalation
 104                 * interrupt might have fired and be on its way to the
 105                 * host queue while we mask it, and if we unmask it
 106                 * early enough (re-cede right away), there is a
 107                 * theorical possibility that it fires again, thus
 108                 * landing in the target queue more than once which is
 109                 * a big no-no.
 110                 *
 111                 * Fortunately, solving this is rather easy. If the
 112                 * above load setting PQ to 01 returns a previous
 113                 * value where P is set, then we know the escalation
 114                 * interrupt is somewhere on its way to the host. In
 115                 * that case we simply don't clear the xive_esc_on
 116                 * flag below. It will be eventually cleared by the
 117                 * handler for the escalation interrupt.
 118                 *
 119                 * Then, when doing a cede, we check that flag again
 120                 * before re-enabling the escalation interrupt, and if
 121                 * set, we abort the cede.
 122                 */
 123                if (!(pq & XIVE_ESB_VAL_P))
 124                        /* Now P is 0, we can clear the flag */
 125                        vcpu->arch.xive_esc_on = 0;
 126        }
 127}
 128EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
 129
 130/*
 131 * This is a simple trigger for a generic XIVE IRQ. This must
 132 * only be called for interrupts that support a trigger page
 133 */
 134static bool xive_irq_trigger(struct xive_irq_data *xd)
 135{
 136        /* This should be only for MSIs */
 137        if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
 138                return false;
 139
 140        /* Those interrupts should always have a trigger page */
 141        if (WARN_ON(!xd->trig_mmio))
 142                return false;
 143
 144        out_be64(xd->trig_mmio, 0);
 145
 146        return true;
 147}
 148
 149static irqreturn_t xive_esc_irq(int irq, void *data)
 150{
 151        struct kvm_vcpu *vcpu = data;
 152
 153        vcpu->arch.irq_pending = 1;
 154        smp_mb();
 155        if (vcpu->arch.ceded)
 156                kvmppc_fast_vcpu_kick(vcpu);
 157
 158        /* Since we have the no-EOI flag, the interrupt is effectively
 159         * disabled now. Clearing xive_esc_on means we won't bother
 160         * doing so on the next entry.
 161         *
 162         * This also allows the entry code to know that if a PQ combination
 163         * of 10 is observed while xive_esc_on is true, it means the queue
 164         * contains an unprocessed escalation interrupt. We don't make use of
 165         * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
 166         */
 167        vcpu->arch.xive_esc_on = false;
 168
 169        /* This orders xive_esc_on = false vs. subsequent stale_p = true */
 170        smp_wmb();      /* goes with smp_mb() in cleanup_single_escalation */
 171
 172        return IRQ_HANDLED;
 173}
 174
 175int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
 176                                  bool single_escalation)
 177{
 178        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 179        struct xive_q *q = &xc->queues[prio];
 180        char *name = NULL;
 181        int rc;
 182
 183        /* Already there ? */
 184        if (xc->esc_virq[prio])
 185                return 0;
 186
 187        /* Hook up the escalation interrupt */
 188        xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
 189        if (!xc->esc_virq[prio]) {
 190                pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
 191                       prio, xc->server_num);
 192                return -EIO;
 193        }
 194
 195        if (single_escalation)
 196                name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
 197                                 vcpu->kvm->arch.lpid, xc->server_num);
 198        else
 199                name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
 200                                 vcpu->kvm->arch.lpid, xc->server_num, prio);
 201        if (!name) {
 202                pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
 203                       prio, xc->server_num);
 204                rc = -ENOMEM;
 205                goto error;
 206        }
 207
 208        pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
 209
 210        rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
 211                         IRQF_NO_THREAD, name, vcpu);
 212        if (rc) {
 213                pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
 214                       prio, xc->server_num);
 215                goto error;
 216        }
 217        xc->esc_virq_names[prio] = name;
 218
 219        /* In single escalation mode, we grab the ESB MMIO of the
 220         * interrupt and mask it. Also populate the VCPU v/raddr
 221         * of the ESB page for use by asm entry/exit code. Finally
 222         * set the XIVE_IRQ_NO_EOI flag which will prevent the
 223         * core code from performing an EOI on the escalation
 224         * interrupt, thus leaving it effectively masked after
 225         * it fires once.
 226         */
 227        if (single_escalation) {
 228                struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
 229                struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
 230
 231                xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
 232                vcpu->arch.xive_esc_raddr = xd->eoi_page;
 233                vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
 234                xd->flags |= XIVE_IRQ_NO_EOI;
 235        }
 236
 237        return 0;
 238error:
 239        irq_dispose_mapping(xc->esc_virq[prio]);
 240        xc->esc_virq[prio] = 0;
 241        kfree(name);
 242        return rc;
 243}
 244
 245static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
 246{
 247        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 248        struct kvmppc_xive *xive = xc->xive;
 249        struct xive_q *q =  &xc->queues[prio];
 250        void *qpage;
 251        int rc;
 252
 253        if (WARN_ON(q->qpage))
 254                return 0;
 255
 256        /* Allocate the queue and retrieve infos on current node for now */
 257        qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
 258        if (!qpage) {
 259                pr_err("Failed to allocate queue %d for VCPU %d\n",
 260                       prio, xc->server_num);
 261                return -ENOMEM;
 262        }
 263        memset(qpage, 0, 1 << xive->q_order);
 264
 265        /*
 266         * Reconfigure the queue. This will set q->qpage only once the
 267         * queue is fully configured. This is a requirement for prio 0
 268         * as we will stop doing EOIs for every IPI as soon as we observe
 269         * qpage being non-NULL, and instead will only EOI when we receive
 270         * corresponding queue 0 entries
 271         */
 272        rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
 273                                         xive->q_order, true);
 274        if (rc)
 275                pr_err("Failed to configure queue %d for VCPU %d\n",
 276                       prio, xc->server_num);
 277        return rc;
 278}
 279
 280/* Called with xive->lock held */
 281static int xive_check_provisioning(struct kvm *kvm, u8 prio)
 282{
 283        struct kvmppc_xive *xive = kvm->arch.xive;
 284        struct kvm_vcpu *vcpu;
 285        int i, rc;
 286
 287        lockdep_assert_held(&xive->lock);
 288
 289        /* Already provisioned ? */
 290        if (xive->qmap & (1 << prio))
 291                return 0;
 292
 293        pr_devel("Provisioning prio... %d\n", prio);
 294
 295        /* Provision each VCPU and enable escalations if needed */
 296        kvm_for_each_vcpu(i, vcpu, kvm) {
 297                if (!vcpu->arch.xive_vcpu)
 298                        continue;
 299                rc = xive_provision_queue(vcpu, prio);
 300                if (rc == 0 && !xive->single_escalation)
 301                        kvmppc_xive_attach_escalation(vcpu, prio,
 302                                                      xive->single_escalation);
 303                if (rc)
 304                        return rc;
 305        }
 306
 307        /* Order previous stores and mark it as provisioned */
 308        mb();
 309        xive->qmap |= (1 << prio);
 310        return 0;
 311}
 312
 313static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
 314{
 315        struct kvm_vcpu *vcpu;
 316        struct kvmppc_xive_vcpu *xc;
 317        struct xive_q *q;
 318
 319        /* Locate target server */
 320        vcpu = kvmppc_xive_find_server(kvm, server);
 321        if (!vcpu) {
 322                pr_warn("%s: Can't find server %d\n", __func__, server);
 323                return;
 324        }
 325        xc = vcpu->arch.xive_vcpu;
 326        if (WARN_ON(!xc))
 327                return;
 328
 329        q = &xc->queues[prio];
 330        atomic_inc(&q->pending_count);
 331}
 332
 333static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
 334{
 335        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 336        struct xive_q *q;
 337        u32 max;
 338
 339        if (WARN_ON(!xc))
 340                return -ENXIO;
 341        if (!xc->valid)
 342                return -ENXIO;
 343
 344        q = &xc->queues[prio];
 345        if (WARN_ON(!q->qpage))
 346                return -ENXIO;
 347
 348        /* Calculate max number of interrupts in that queue. */
 349        max = (q->msk + 1) - XIVE_Q_GAP;
 350        return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
 351}
 352
 353int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
 354{
 355        struct kvm_vcpu *vcpu;
 356        int i, rc;
 357
 358        /* Locate target server */
 359        vcpu = kvmppc_xive_find_server(kvm, *server);
 360        if (!vcpu) {
 361                pr_devel("Can't find server %d\n", *server);
 362                return -EINVAL;
 363        }
 364
 365        pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
 366
 367        /* Try pick it */
 368        rc = xive_try_pick_queue(vcpu, prio);
 369        if (rc == 0)
 370                return rc;
 371
 372        pr_devel(" .. failed, looking up candidate...\n");
 373
 374        /* Failed, pick another VCPU */
 375        kvm_for_each_vcpu(i, vcpu, kvm) {
 376                if (!vcpu->arch.xive_vcpu)
 377                        continue;
 378                rc = xive_try_pick_queue(vcpu, prio);
 379                if (rc == 0) {
 380                        *server = vcpu->arch.xive_vcpu->server_num;
 381                        pr_devel("  found on 0x%x/%d\n", *server, prio);
 382                        return rc;
 383                }
 384        }
 385        pr_devel("  no available target !\n");
 386
 387        /* No available target ! */
 388        return -EBUSY;
 389}
 390
 391static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
 392                             struct kvmppc_xive_src_block *sb,
 393                             struct kvmppc_xive_irq_state *state)
 394{
 395        struct xive_irq_data *xd;
 396        u32 hw_num;
 397        u8 old_prio;
 398        u64 val;
 399
 400        /*
 401         * Take the lock, set masked, try again if racing
 402         * with H_EOI
 403         */
 404        for (;;) {
 405                arch_spin_lock(&sb->lock);
 406                old_prio = state->guest_priority;
 407                state->guest_priority = MASKED;
 408                mb();
 409                if (!state->in_eoi)
 410                        break;
 411                state->guest_priority = old_prio;
 412                arch_spin_unlock(&sb->lock);
 413        }
 414
 415        /* No change ? Bail */
 416        if (old_prio == MASKED)
 417                return old_prio;
 418
 419        /* Get the right irq */
 420        kvmppc_xive_select_irq(state, &hw_num, &xd);
 421
 422        /*
 423         * If the interrupt is marked as needing masking via
 424         * firmware, we do it here. Firmware masking however
 425         * is "lossy", it won't return the old p and q bits
 426         * and won't set the interrupt to a state where it will
 427         * record queued ones. If this is an issue we should do
 428         * lazy masking instead.
 429         *
 430         * For now, we work around this in unmask by forcing
 431         * an interrupt whenever we unmask a non-LSI via FW
 432         * (if ever).
 433         */
 434        if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
 435                xive_native_configure_irq(hw_num,
 436                                kvmppc_xive_vp(xive, state->act_server),
 437                                MASKED, state->number);
 438                /* set old_p so we can track if an H_EOI was done */
 439                state->old_p = true;
 440                state->old_q = false;
 441        } else {
 442                /* Set PQ to 10, return old P and old Q and remember them */
 443                val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
 444                state->old_p = !!(val & 2);
 445                state->old_q = !!(val & 1);
 446
 447                /*
 448                 * Synchronize hardware to sensure the queues are updated
 449                 * when masking
 450                 */
 451                xive_native_sync_source(hw_num);
 452        }
 453
 454        return old_prio;
 455}
 456
 457static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
 458                                 struct kvmppc_xive_irq_state *state)
 459{
 460        /*
 461         * Take the lock try again if racing with H_EOI
 462         */
 463        for (;;) {
 464                arch_spin_lock(&sb->lock);
 465                if (!state->in_eoi)
 466                        break;
 467                arch_spin_unlock(&sb->lock);
 468        }
 469}
 470
 471static void xive_finish_unmask(struct kvmppc_xive *xive,
 472                               struct kvmppc_xive_src_block *sb,
 473                               struct kvmppc_xive_irq_state *state,
 474                               u8 prio)
 475{
 476        struct xive_irq_data *xd;
 477        u32 hw_num;
 478
 479        /* If we aren't changing a thing, move on */
 480        if (state->guest_priority != MASKED)
 481                goto bail;
 482
 483        /* Get the right irq */
 484        kvmppc_xive_select_irq(state, &hw_num, &xd);
 485
 486        /*
 487         * See command in xive_lock_and_mask() concerning masking
 488         * via firmware.
 489         */
 490        if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
 491                xive_native_configure_irq(hw_num,
 492                                kvmppc_xive_vp(xive, state->act_server),
 493                                state->act_priority, state->number);
 494                /* If an EOI is needed, do it here */
 495                if (!state->old_p)
 496                        xive_vm_source_eoi(hw_num, xd);
 497                /* If this is not an LSI, force a trigger */
 498                if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
 499                        xive_irq_trigger(xd);
 500                goto bail;
 501        }
 502
 503        /* Old Q set, set PQ to 11 */
 504        if (state->old_q)
 505                xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
 506
 507        /*
 508         * If not old P, then perform an "effective" EOI,
 509         * on the source. This will handle the cases where
 510         * FW EOI is needed.
 511         */
 512        if (!state->old_p)
 513                xive_vm_source_eoi(hw_num, xd);
 514
 515        /* Synchronize ordering and mark unmasked */
 516        mb();
 517bail:
 518        state->guest_priority = prio;
 519}
 520
 521/*
 522 * Target an interrupt to a given server/prio, this will fallback
 523 * to another server if necessary and perform the HW targetting
 524 * updates as needed
 525 *
 526 * NOTE: Must be called with the state lock held
 527 */
 528static int xive_target_interrupt(struct kvm *kvm,
 529                                 struct kvmppc_xive_irq_state *state,
 530                                 u32 server, u8 prio)
 531{
 532        struct kvmppc_xive *xive = kvm->arch.xive;
 533        u32 hw_num;
 534        int rc;
 535
 536        /*
 537         * This will return a tentative server and actual
 538         * priority. The count for that new target will have
 539         * already been incremented.
 540         */
 541        rc = kvmppc_xive_select_target(kvm, &server, prio);
 542
 543        /*
 544         * We failed to find a target ? Not much we can do
 545         * at least until we support the GIQ.
 546         */
 547        if (rc)
 548                return rc;
 549
 550        /*
 551         * Increment the old queue pending count if there
 552         * was one so that the old queue count gets adjusted later
 553         * when observed to be empty.
 554         */
 555        if (state->act_priority != MASKED)
 556                xive_inc_q_pending(kvm,
 557                                   state->act_server,
 558                                   state->act_priority);
 559        /*
 560         * Update state and HW
 561         */
 562        state->act_priority = prio;
 563        state->act_server = server;
 564
 565        /* Get the right irq */
 566        kvmppc_xive_select_irq(state, &hw_num, NULL);
 567
 568        return xive_native_configure_irq(hw_num,
 569                                         kvmppc_xive_vp(xive, server),
 570                                         prio, state->number);
 571}
 572
 573/*
 574 * Targetting rules: In order to avoid losing track of
 575 * pending interrupts accross mask and unmask, which would
 576 * allow queue overflows, we implement the following rules:
 577 *
 578 *  - Unless it was never enabled (or we run out of capacity)
 579 *    an interrupt is always targetted at a valid server/queue
 580 *    pair even when "masked" by the guest. This pair tends to
 581 *    be the last one used but it can be changed under some
 582 *    circumstances. That allows us to separate targetting
 583 *    from masking, we only handle accounting during (re)targetting,
 584 *    this also allows us to let an interrupt drain into its target
 585 *    queue after masking, avoiding complex schemes to remove
 586 *    interrupts out of remote processor queues.
 587 *
 588 *  - When masking, we set PQ to 10 and save the previous value
 589 *    of P and Q.
 590 *
 591 *  - When unmasking, if saved Q was set, we set PQ to 11
 592 *    otherwise we leave PQ to the HW state which will be either
 593 *    10 if nothing happened or 11 if the interrupt fired while
 594 *    masked. Effectively we are OR'ing the previous Q into the
 595 *    HW Q.
 596 *
 597 *    Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
 598 *    which will unmask the interrupt and shoot a new one if Q was
 599 *    set.
 600 *
 601 *    Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
 602 *    effectively meaning an H_EOI from the guest is still expected
 603 *    for that interrupt).
 604 *
 605 *  - If H_EOI occurs while masked, we clear the saved P.
 606 *
 607 *  - When changing target, we account on the new target and
 608 *    increment a separate "pending" counter on the old one.
 609 *    This pending counter will be used to decrement the old
 610 *    target's count when its queue has been observed empty.
 611 */
 612
 613int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
 614                         u32 priority)
 615{
 616        struct kvmppc_xive *xive = kvm->arch.xive;
 617        struct kvmppc_xive_src_block *sb;
 618        struct kvmppc_xive_irq_state *state;
 619        u8 new_act_prio;
 620        int rc = 0;
 621        u16 idx;
 622
 623        if (!xive)
 624                return -ENODEV;
 625
 626        pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
 627                 irq, server, priority);
 628
 629        /* First, check provisioning of queues */
 630        if (priority != MASKED) {
 631                mutex_lock(&xive->lock);
 632                rc = xive_check_provisioning(xive->kvm,
 633                              xive_prio_from_guest(priority));
 634                mutex_unlock(&xive->lock);
 635        }
 636        if (rc) {
 637                pr_devel("  provisioning failure %d !\n", rc);
 638                return rc;
 639        }
 640
 641        sb = kvmppc_xive_find_source(xive, irq, &idx);
 642        if (!sb)
 643                return -EINVAL;
 644        state = &sb->irq_state[idx];
 645
 646        /*
 647         * We first handle masking/unmasking since the locking
 648         * might need to be retried due to EOIs, we'll handle
 649         * targetting changes later. These functions will return
 650         * with the SB lock held.
 651         *
 652         * xive_lock_and_mask() will also set state->guest_priority
 653         * but won't otherwise change other fields of the state.
 654         *
 655         * xive_lock_for_unmask will not actually unmask, this will
 656         * be done later by xive_finish_unmask() once the targetting
 657         * has been done, so we don't try to unmask an interrupt
 658         * that hasn't yet been targetted.
 659         */
 660        if (priority == MASKED)
 661                xive_lock_and_mask(xive, sb, state);
 662        else
 663                xive_lock_for_unmask(sb, state);
 664
 665
 666        /*
 667         * Then we handle targetting.
 668         *
 669         * First calculate a new "actual priority"
 670         */
 671        new_act_prio = state->act_priority;
 672        if (priority != MASKED)
 673                new_act_prio = xive_prio_from_guest(priority);
 674
 675        pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
 676                 new_act_prio, state->act_server, state->act_priority);
 677
 678        /*
 679         * Then check if we actually need to change anything,
 680         *
 681         * The condition for re-targetting the interrupt is that
 682         * we have a valid new priority (new_act_prio is not 0xff)
 683         * and either the server or the priority changed.
 684         *
 685         * Note: If act_priority was ff and the new priority is
 686         *       also ff, we don't do anything and leave the interrupt
 687         *       untargetted. An attempt of doing an int_on on an
 688         *       untargetted interrupt will fail. If that is a problem
 689         *       we could initialize interrupts with valid default
 690         */
 691
 692        if (new_act_prio != MASKED &&
 693            (state->act_server != server ||
 694             state->act_priority != new_act_prio))
 695                rc = xive_target_interrupt(kvm, state, server, new_act_prio);
 696
 697        /*
 698         * Perform the final unmasking of the interrupt source
 699         * if necessary
 700         */
 701        if (priority != MASKED)
 702                xive_finish_unmask(xive, sb, state, priority);
 703
 704        /*
 705         * Finally Update saved_priority to match. Only int_on/off
 706         * set this field to a different value.
 707         */
 708        state->saved_priority = priority;
 709
 710        arch_spin_unlock(&sb->lock);
 711        return rc;
 712}
 713
 714int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
 715                         u32 *priority)
 716{
 717        struct kvmppc_xive *xive = kvm->arch.xive;
 718        struct kvmppc_xive_src_block *sb;
 719        struct kvmppc_xive_irq_state *state;
 720        u16 idx;
 721
 722        if (!xive)
 723                return -ENODEV;
 724
 725        sb = kvmppc_xive_find_source(xive, irq, &idx);
 726        if (!sb)
 727                return -EINVAL;
 728        state = &sb->irq_state[idx];
 729        arch_spin_lock(&sb->lock);
 730        *server = state->act_server;
 731        *priority = state->guest_priority;
 732        arch_spin_unlock(&sb->lock);
 733
 734        return 0;
 735}
 736
 737int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
 738{
 739        struct kvmppc_xive *xive = kvm->arch.xive;
 740        struct kvmppc_xive_src_block *sb;
 741        struct kvmppc_xive_irq_state *state;
 742        u16 idx;
 743
 744        if (!xive)
 745                return -ENODEV;
 746
 747        sb = kvmppc_xive_find_source(xive, irq, &idx);
 748        if (!sb)
 749                return -EINVAL;
 750        state = &sb->irq_state[idx];
 751
 752        pr_devel("int_on(irq=0x%x)\n", irq);
 753
 754        /*
 755         * Check if interrupt was not targetted
 756         */
 757        if (state->act_priority == MASKED) {
 758                pr_devel("int_on on untargetted interrupt\n");
 759                return -EINVAL;
 760        }
 761
 762        /* If saved_priority is 0xff, do nothing */
 763        if (state->saved_priority == MASKED)
 764                return 0;
 765
 766        /*
 767         * Lock and unmask it.
 768         */
 769        xive_lock_for_unmask(sb, state);
 770        xive_finish_unmask(xive, sb, state, state->saved_priority);
 771        arch_spin_unlock(&sb->lock);
 772
 773        return 0;
 774}
 775
 776int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
 777{
 778        struct kvmppc_xive *xive = kvm->arch.xive;
 779        struct kvmppc_xive_src_block *sb;
 780        struct kvmppc_xive_irq_state *state;
 781        u16 idx;
 782
 783        if (!xive)
 784                return -ENODEV;
 785
 786        sb = kvmppc_xive_find_source(xive, irq, &idx);
 787        if (!sb)
 788                return -EINVAL;
 789        state = &sb->irq_state[idx];
 790
 791        pr_devel("int_off(irq=0x%x)\n", irq);
 792
 793        /*
 794         * Lock and mask
 795         */
 796        state->saved_priority = xive_lock_and_mask(xive, sb, state);
 797        arch_spin_unlock(&sb->lock);
 798
 799        return 0;
 800}
 801
 802static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
 803{
 804        struct kvmppc_xive_src_block *sb;
 805        struct kvmppc_xive_irq_state *state;
 806        u16 idx;
 807
 808        sb = kvmppc_xive_find_source(xive, irq, &idx);
 809        if (!sb)
 810                return false;
 811        state = &sb->irq_state[idx];
 812        if (!state->valid)
 813                return false;
 814
 815        /*
 816         * Trigger the IPI. This assumes we never restore a pass-through
 817         * interrupt which should be safe enough
 818         */
 819        xive_irq_trigger(&state->ipi_data);
 820
 821        return true;
 822}
 823
 824u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
 825{
 826        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 827
 828        if (!xc)
 829                return 0;
 830
 831        /* Return the per-cpu state for state saving/migration */
 832        return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
 833               (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
 834               (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
 835}
 836
 837int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
 838{
 839        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 840        struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
 841        u8 cppr, mfrr;
 842        u32 xisr;
 843
 844        if (!xc || !xive)
 845                return -ENOENT;
 846
 847        /* Grab individual state fields. We don't use pending_pri */
 848        cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
 849        xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
 850                KVM_REG_PPC_ICP_XISR_MASK;
 851        mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
 852
 853        pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
 854                 xc->server_num, cppr, mfrr, xisr);
 855
 856        /*
 857         * We can't update the state of a "pushed" VCPU, but that
 858         * shouldn't happen because the vcpu->mutex makes running a
 859         * vcpu mutually exclusive with doing one_reg get/set on it.
 860         */
 861        if (WARN_ON(vcpu->arch.xive_pushed))
 862                return -EIO;
 863
 864        /* Update VCPU HW saved state */
 865        vcpu->arch.xive_saved_state.cppr = cppr;
 866        xc->hw_cppr = xc->cppr = cppr;
 867
 868        /*
 869         * Update MFRR state. If it's not 0xff, we mark the VCPU as
 870         * having a pending MFRR change, which will re-evaluate the
 871         * target. The VCPU will thus potentially get a spurious
 872         * interrupt but that's not a big deal.
 873         */
 874        xc->mfrr = mfrr;
 875        if (mfrr < cppr)
 876                xive_irq_trigger(&xc->vp_ipi_data);
 877
 878        /*
 879         * Now saved XIRR is "interesting". It means there's something in
 880         * the legacy "1 element" queue... for an IPI we simply ignore it,
 881         * as the MFRR restore will handle that. For anything else we need
 882         * to force a resend of the source.
 883         * However the source may not have been setup yet. If that's the
 884         * case, we keep that info and increment a counter in the xive to
 885         * tell subsequent xive_set_source() to go look.
 886         */
 887        if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
 888                xc->delayed_irq = xisr;
 889                xive->delayed_irqs++;
 890                pr_devel("  xisr restore delayed\n");
 891        }
 892
 893        return 0;
 894}
 895
 896int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
 897                           struct irq_desc *host_desc)
 898{
 899        struct kvmppc_xive *xive = kvm->arch.xive;
 900        struct kvmppc_xive_src_block *sb;
 901        struct kvmppc_xive_irq_state *state;
 902        struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
 903        unsigned int host_irq = irq_desc_get_irq(host_desc);
 904        unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
 905        u16 idx;
 906        u8 prio;
 907        int rc;
 908
 909        if (!xive)
 910                return -ENODEV;
 911
 912        pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
 913
 914        sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
 915        if (!sb)
 916                return -EINVAL;
 917        state = &sb->irq_state[idx];
 918
 919        /*
 920         * Mark the passed-through interrupt as going to a VCPU,
 921         * this will prevent further EOIs and similar operations
 922         * from the XIVE code. It will also mask the interrupt
 923         * to either PQ=10 or 11 state, the latter if the interrupt
 924         * is pending. This will allow us to unmask or retrigger it
 925         * after routing it to the guest with a simple EOI.
 926         *
 927         * The "state" argument is a "token", all it needs is to be
 928         * non-NULL to switch to passed-through or NULL for the
 929         * other way around. We may not yet have an actual VCPU
 930         * target here and we don't really care.
 931         */
 932        rc = irq_set_vcpu_affinity(host_irq, state);
 933        if (rc) {
 934                pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
 935                return rc;
 936        }
 937
 938        /*
 939         * Mask and read state of IPI. We need to know if its P bit
 940         * is set as that means it's potentially already using a
 941         * queue entry in the target
 942         */
 943        prio = xive_lock_and_mask(xive, sb, state);
 944        pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
 945                 state->old_p, state->old_q);
 946
 947        /* Turn the IPI hard off */
 948        xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
 949
 950        /*
 951         * Reset ESB guest mapping. Needed when ESB pages are exposed
 952         * to the guest in XIVE native mode
 953         */
 954        if (xive->ops && xive->ops->reset_mapped)
 955                xive->ops->reset_mapped(kvm, guest_irq);
 956
 957        /* Grab info about irq */
 958        state->pt_number = hw_irq;
 959        state->pt_data = irq_data_get_irq_handler_data(host_data);
 960
 961        /*
 962         * Configure the IRQ to match the existing configuration of
 963         * the IPI if it was already targetted. Otherwise this will
 964         * mask the interrupt in a lossy way (act_priority is 0xff)
 965         * which is fine for a never started interrupt.
 966         */
 967        xive_native_configure_irq(hw_irq,
 968                                  kvmppc_xive_vp(xive, state->act_server),
 969                                  state->act_priority, state->number);
 970
 971        /*
 972         * We do an EOI to enable the interrupt (and retrigger if needed)
 973         * if the guest has the interrupt unmasked and the P bit was *not*
 974         * set in the IPI. If it was set, we know a slot may still be in
 975         * use in the target queue thus we have to wait for a guest
 976         * originated EOI
 977         */
 978        if (prio != MASKED && !state->old_p)
 979                xive_vm_source_eoi(hw_irq, state->pt_data);
 980
 981        /* Clear old_p/old_q as they are no longer relevant */
 982        state->old_p = state->old_q = false;
 983
 984        /* Restore guest prio (unlocks EOI) */
 985        mb();
 986        state->guest_priority = prio;
 987        arch_spin_unlock(&sb->lock);
 988
 989        return 0;
 990}
 991EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
 992
 993int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
 994                           struct irq_desc *host_desc)
 995{
 996        struct kvmppc_xive *xive = kvm->arch.xive;
 997        struct kvmppc_xive_src_block *sb;
 998        struct kvmppc_xive_irq_state *state;
 999        unsigned int host_irq = irq_desc_get_irq(host_desc);
1000        u16 idx;
1001        u8 prio;
1002        int rc;
1003
1004        if (!xive)
1005                return -ENODEV;
1006
1007        pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
1008
1009        sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1010        if (!sb)
1011                return -EINVAL;
1012        state = &sb->irq_state[idx];
1013
1014        /*
1015         * Mask and read state of IRQ. We need to know if its P bit
1016         * is set as that means it's potentially already using a
1017         * queue entry in the target
1018         */
1019        prio = xive_lock_and_mask(xive, sb, state);
1020        pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
1021                 state->old_p, state->old_q);
1022
1023        /*
1024         * If old_p is set, the interrupt is pending, we switch it to
1025         * PQ=11. This will force a resend in the host so the interrupt
1026         * isn't lost to whatver host driver may pick it up
1027         */
1028        if (state->old_p)
1029                xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
1030
1031        /* Release the passed-through interrupt to the host */
1032        rc = irq_set_vcpu_affinity(host_irq, NULL);
1033        if (rc) {
1034                pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
1035                return rc;
1036        }
1037
1038        /* Forget about the IRQ */
1039        state->pt_number = 0;
1040        state->pt_data = NULL;
1041
1042        /*
1043         * Reset ESB guest mapping. Needed when ESB pages are exposed
1044         * to the guest in XIVE native mode
1045         */
1046        if (xive->ops && xive->ops->reset_mapped) {
1047                xive->ops->reset_mapped(kvm, guest_irq);
1048        }
1049
1050        /* Reconfigure the IPI */
1051        xive_native_configure_irq(state->ipi_number,
1052                                  kvmppc_xive_vp(xive, state->act_server),
1053                                  state->act_priority, state->number);
1054
1055        /*
1056         * If old_p is set (we have a queue entry potentially
1057         * occupied) or the interrupt is masked, we set the IPI
1058         * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
1059         */
1060        if (prio == MASKED || state->old_p)
1061                xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
1062        else
1063                xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
1064
1065        /* Restore guest prio (unlocks EOI) */
1066        mb();
1067        state->guest_priority = prio;
1068        arch_spin_unlock(&sb->lock);
1069
1070        return 0;
1071}
1072EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
1073
1074void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
1075{
1076        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1077        struct kvm *kvm = vcpu->kvm;
1078        struct kvmppc_xive *xive = kvm->arch.xive;
1079        int i, j;
1080
1081        for (i = 0; i <= xive->max_sbid; i++) {
1082                struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1083
1084                if (!sb)
1085                        continue;
1086                for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1087                        struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1088
1089                        if (!state->valid)
1090                                continue;
1091                        if (state->act_priority == MASKED)
1092                                continue;
1093                        if (state->act_server != xc->server_num)
1094                                continue;
1095
1096                        /* Clean it up */
1097                        arch_spin_lock(&sb->lock);
1098                        state->act_priority = MASKED;
1099                        xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1100                        xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1101                        if (state->pt_number) {
1102                                xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1103                                xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1104                        }
1105                        arch_spin_unlock(&sb->lock);
1106                }
1107        }
1108
1109        /* Disable vcpu's escalation interrupt */
1110        if (vcpu->arch.xive_esc_on) {
1111                __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
1112                                             XIVE_ESB_SET_PQ_01));
1113                vcpu->arch.xive_esc_on = false;
1114        }
1115
1116        /*
1117         * Clear pointers to escalation interrupt ESB.
1118         * This is safe because the vcpu->mutex is held, preventing
1119         * any other CPU from concurrently executing a KVM_RUN ioctl.
1120         */
1121        vcpu->arch.xive_esc_vaddr = 0;
1122        vcpu->arch.xive_esc_raddr = 0;
1123}
1124
1125/*
1126 * In single escalation mode, the escalation interrupt is marked so
1127 * that EOI doesn't re-enable it, but just sets the stale_p flag to
1128 * indicate that the P bit has already been dealt with.  However, the
1129 * assembly code that enters the guest sets PQ to 00 without clearing
1130 * stale_p (because it has no easy way to address it).  Hence we have
1131 * to adjust stale_p before shutting down the interrupt.
1132 */
1133void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
1134                                    struct kvmppc_xive_vcpu *xc, int irq)
1135{
1136        struct irq_data *d = irq_get_irq_data(irq);
1137        struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1138
1139        /*
1140         * This slightly odd sequence gives the right result
1141         * (i.e. stale_p set if xive_esc_on is false) even if
1142         * we race with xive_esc_irq() and xive_irq_eoi().
1143         */
1144        xd->stale_p = false;
1145        smp_mb();               /* paired with smb_wmb in xive_esc_irq */
1146        if (!vcpu->arch.xive_esc_on)
1147                xd->stale_p = true;
1148}
1149
1150void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1151{
1152        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1153        struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1154        int i;
1155
1156        if (!kvmppc_xics_enabled(vcpu))
1157                return;
1158
1159        if (!xc)
1160                return;
1161
1162        pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1163
1164        /* Ensure no interrupt is still routed to that VP */
1165        xc->valid = false;
1166        kvmppc_xive_disable_vcpu_interrupts(vcpu);
1167
1168        /* Mask the VP IPI */
1169        xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1170
1171        /* Free escalations */
1172        for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1173                if (xc->esc_virq[i]) {
1174                        if (xc->xive->single_escalation)
1175                                xive_cleanup_single_escalation(vcpu, xc,
1176                                                        xc->esc_virq[i]);
1177                        free_irq(xc->esc_virq[i], vcpu);
1178                        irq_dispose_mapping(xc->esc_virq[i]);
1179                        kfree(xc->esc_virq_names[i]);
1180                }
1181        }
1182
1183        /* Disable the VP */
1184        xive_native_disable_vp(xc->vp_id);
1185
1186        /* Clear the cam word so guest entry won't try to push context */
1187        vcpu->arch.xive_cam_word = 0;
1188
1189        /* Free the queues */
1190        for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1191                struct xive_q *q = &xc->queues[i];
1192
1193                xive_native_disable_queue(xc->vp_id, q, i);
1194                if (q->qpage) {
1195                        free_pages((unsigned long)q->qpage,
1196                                   xive->q_page_order);
1197                        q->qpage = NULL;
1198                }
1199        }
1200
1201        /* Free the IPI */
1202        if (xc->vp_ipi) {
1203                xive_cleanup_irq_data(&xc->vp_ipi_data);
1204                xive_native_free_irq(xc->vp_ipi);
1205        }
1206        /* Free the VP */
1207        kfree(xc);
1208
1209        /* Cleanup the vcpu */
1210        vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1211        vcpu->arch.xive_vcpu = NULL;
1212}
1213
1214static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
1215{
1216        /* We have a block of xive->nr_servers VPs. We just need to check
1217         * raw vCPU ids are below the expected limit for this guest's
1218         * core stride ; kvmppc_pack_vcpu_id() will pack them down to an
1219         * index that can be safely used to compute a VP id that belongs
1220         * to the VP block.
1221         */
1222        return cpu < xive->nr_servers * xive->kvm->arch.emul_smt_mode;
1223}
1224
1225int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
1226{
1227        u32 vp_id;
1228
1229        if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) {
1230                pr_devel("Out of bounds !\n");
1231                return -EINVAL;
1232        }
1233
1234        if (xive->vp_base == XIVE_INVALID_VP) {
1235                xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers);
1236                pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers);
1237
1238                if (xive->vp_base == XIVE_INVALID_VP)
1239                        return -ENOSPC;
1240        }
1241
1242        vp_id = kvmppc_xive_vp(xive, cpu);
1243        if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
1244                pr_devel("Duplicate !\n");
1245                return -EEXIST;
1246        }
1247
1248        *vp = vp_id;
1249
1250        return 0;
1251}
1252
1253int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1254                             struct kvm_vcpu *vcpu, u32 cpu)
1255{
1256        struct kvmppc_xive *xive = dev->private;
1257        struct kvmppc_xive_vcpu *xc;
1258        int i, r = -EBUSY;
1259        u32 vp_id;
1260
1261        pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1262
1263        if (dev->ops != &kvm_xive_ops) {
1264                pr_devel("Wrong ops !\n");
1265                return -EPERM;
1266        }
1267        if (xive->kvm != vcpu->kvm)
1268                return -EPERM;
1269        if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
1270                return -EBUSY;
1271
1272        /* We need to synchronize with queue provisioning */
1273        mutex_lock(&xive->lock);
1274
1275        r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id);
1276        if (r)
1277                goto bail;
1278
1279        xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1280        if (!xc) {
1281                r = -ENOMEM;
1282                goto bail;
1283        }
1284
1285        vcpu->arch.xive_vcpu = xc;
1286        xc->xive = xive;
1287        xc->vcpu = vcpu;
1288        xc->server_num = cpu;
1289        xc->vp_id = vp_id;
1290        xc->mfrr = 0xff;
1291        xc->valid = true;
1292
1293        r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1294        if (r)
1295                goto bail;
1296
1297        /* Configure VCPU fields for use by assembly push/pull */
1298        vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1299        vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1300
1301        /* Allocate IPI */
1302        xc->vp_ipi = xive_native_alloc_irq();
1303        if (!xc->vp_ipi) {
1304                pr_err("Failed to allocate xive irq for VCPU IPI\n");
1305                r = -EIO;
1306                goto bail;
1307        }
1308        pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1309
1310        r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1311        if (r)
1312                goto bail;
1313
1314        /*
1315         * Enable the VP first as the single escalation mode will
1316         * affect escalation interrupts numbering
1317         */
1318        r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1319        if (r) {
1320                pr_err("Failed to enable VP in OPAL, err %d\n", r);
1321                goto bail;
1322        }
1323
1324        /*
1325         * Initialize queues. Initially we set them all for no queueing
1326         * and we enable escalation for queue 0 only which we'll use for
1327         * our mfrr change notifications. If the VCPU is hot-plugged, we
1328         * do handle provisioning however based on the existing "map"
1329         * of enabled queues.
1330         */
1331        for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1332                struct xive_q *q = &xc->queues[i];
1333
1334                /* Single escalation, no queue 7 */
1335                if (i == 7 && xive->single_escalation)
1336                        break;
1337
1338                /* Is queue already enabled ? Provision it */
1339                if (xive->qmap & (1 << i)) {
1340                        r = xive_provision_queue(vcpu, i);
1341                        if (r == 0 && !xive->single_escalation)
1342                                kvmppc_xive_attach_escalation(
1343                                        vcpu, i, xive->single_escalation);
1344                        if (r)
1345                                goto bail;
1346                } else {
1347                        r = xive_native_configure_queue(xc->vp_id,
1348                                                        q, i, NULL, 0, true);
1349                        if (r) {
1350                                pr_err("Failed to configure queue %d for VCPU %d\n",
1351                                       i, cpu);
1352                                goto bail;
1353                        }
1354                }
1355        }
1356
1357        /* If not done above, attach priority 0 escalation */
1358        r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
1359        if (r)
1360                goto bail;
1361
1362        /* Route the IPI */
1363        r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1364        if (!r)
1365                xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1366
1367bail:
1368        mutex_unlock(&xive->lock);
1369        if (r) {
1370                kvmppc_xive_cleanup_vcpu(vcpu);
1371                return r;
1372        }
1373
1374        vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1375        return 0;
1376}
1377
1378/*
1379 * Scanning of queues before/after migration save
1380 */
1381static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1382{
1383        struct kvmppc_xive_src_block *sb;
1384        struct kvmppc_xive_irq_state *state;
1385        u16 idx;
1386
1387        sb = kvmppc_xive_find_source(xive, irq, &idx);
1388        if (!sb)
1389                return;
1390
1391        state = &sb->irq_state[idx];
1392
1393        /* Some sanity checking */
1394        if (!state->valid) {
1395                pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1396                return;
1397        }
1398
1399        /*
1400         * If the interrupt is in a queue it should have P set.
1401         * We warn so that gets reported. A backtrace isn't useful
1402         * so no need to use a WARN_ON.
1403         */
1404        if (!state->saved_p)
1405                pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1406
1407        /* Set flag */
1408        state->in_queue = true;
1409}
1410
1411static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1412                                   struct kvmppc_xive_src_block *sb,
1413                                   u32 irq)
1414{
1415        struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1416
1417        if (!state->valid)
1418                return;
1419
1420        /* Mask and save state, this will also sync HW queues */
1421        state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1422
1423        /* Transfer P and Q */
1424        state->saved_p = state->old_p;
1425        state->saved_q = state->old_q;
1426
1427        /* Unlock */
1428        arch_spin_unlock(&sb->lock);
1429}
1430
1431static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1432                                     struct kvmppc_xive_src_block *sb,
1433                                     u32 irq)
1434{
1435        struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1436
1437        if (!state->valid)
1438                return;
1439
1440        /*
1441         * Lock / exclude EOI (not technically necessary if the
1442         * guest isn't running concurrently. If this becomes a
1443         * performance issue we can probably remove the lock.
1444         */
1445        xive_lock_for_unmask(sb, state);
1446
1447        /* Restore mask/prio if it wasn't masked */
1448        if (state->saved_scan_prio != MASKED)
1449                xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1450
1451        /* Unlock */
1452        arch_spin_unlock(&sb->lock);
1453}
1454
1455static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1456{
1457        u32 idx = q->idx;
1458        u32 toggle = q->toggle;
1459        u32 irq;
1460
1461        do {
1462                irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1463                if (irq > XICS_IPI)
1464                        xive_pre_save_set_queued(xive, irq);
1465        } while(irq);
1466}
1467
1468static void xive_pre_save_scan(struct kvmppc_xive *xive)
1469{
1470        struct kvm_vcpu *vcpu = NULL;
1471        int i, j;
1472
1473        /*
1474         * See comment in xive_get_source() about how this
1475         * work. Collect a stable state for all interrupts
1476         */
1477        for (i = 0; i <= xive->max_sbid; i++) {
1478                struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1479                if (!sb)
1480                        continue;
1481                for (j = 0;  j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1482                        xive_pre_save_mask_irq(xive, sb, j);
1483        }
1484
1485        /* Then scan the queues and update the "in_queue" flag */
1486        kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1487                struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1488                if (!xc)
1489                        continue;
1490                for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1491                        if (xc->queues[j].qpage)
1492                                xive_pre_save_queue(xive, &xc->queues[j]);
1493                }
1494        }
1495
1496        /* Finally restore interrupt states */
1497        for (i = 0; i <= xive->max_sbid; i++) {
1498                struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1499                if (!sb)
1500                        continue;
1501                for (j = 0;  j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1502                        xive_pre_save_unmask_irq(xive, sb, j);
1503        }
1504}
1505
1506static void xive_post_save_scan(struct kvmppc_xive *xive)
1507{
1508        u32 i, j;
1509
1510        /* Clear all the in_queue flags */
1511        for (i = 0; i <= xive->max_sbid; i++) {
1512                struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1513                if (!sb)
1514                        continue;
1515                for (j = 0;  j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1516                        sb->irq_state[j].in_queue = false;
1517        }
1518
1519        /* Next get_source() will do a new scan */
1520        xive->saved_src_count = 0;
1521}
1522
1523/*
1524 * This returns the source configuration and state to user space.
1525 */
1526static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1527{
1528        struct kvmppc_xive_src_block *sb;
1529        struct kvmppc_xive_irq_state *state;
1530        u64 __user *ubufp = (u64 __user *) addr;
1531        u64 val, prio;
1532        u16 idx;
1533
1534        sb = kvmppc_xive_find_source(xive, irq, &idx);
1535        if (!sb)
1536                return -ENOENT;
1537
1538        state = &sb->irq_state[idx];
1539
1540        if (!state->valid)
1541                return -ENOENT;
1542
1543        pr_devel("get_source(%ld)...\n", irq);
1544
1545        /*
1546         * So to properly save the state into something that looks like a
1547         * XICS migration stream we cannot treat interrupts individually.
1548         *
1549         * We need, instead, mask them all (& save their previous PQ state)
1550         * to get a stable state in the HW, then sync them to ensure that
1551         * any interrupt that had already fired hits its queue, and finally
1552         * scan all the queues to collect which interrupts are still present
1553         * in the queues, so we can set the "pending" flag on them and
1554         * they can be resent on restore.
1555         *
1556         * So we do it all when the "first" interrupt gets saved, all the
1557         * state is collected at that point, the rest of xive_get_source()
1558         * will merely collect and convert that state to the expected
1559         * userspace bit mask.
1560         */
1561        if (xive->saved_src_count == 0)
1562                xive_pre_save_scan(xive);
1563        xive->saved_src_count++;
1564
1565        /* Convert saved state into something compatible with xics */
1566        val = state->act_server;
1567        prio = state->saved_scan_prio;
1568
1569        if (prio == MASKED) {
1570                val |= KVM_XICS_MASKED;
1571                prio = state->saved_priority;
1572        }
1573        val |= prio << KVM_XICS_PRIORITY_SHIFT;
1574        if (state->lsi) {
1575                val |= KVM_XICS_LEVEL_SENSITIVE;
1576                if (state->saved_p)
1577                        val |= KVM_XICS_PENDING;
1578        } else {
1579                if (state->saved_p)
1580                        val |= KVM_XICS_PRESENTED;
1581
1582                if (state->saved_q)
1583                        val |= KVM_XICS_QUEUED;
1584
1585                /*
1586                 * We mark it pending (which will attempt a re-delivery)
1587                 * if we are in a queue *or* we were masked and had
1588                 * Q set which is equivalent to the XICS "masked pending"
1589                 * state
1590                 */
1591                if (state->in_queue || (prio == MASKED && state->saved_q))
1592                        val |= KVM_XICS_PENDING;
1593        }
1594
1595        /*
1596         * If that was the last interrupt saved, reset the
1597         * in_queue flags
1598         */
1599        if (xive->saved_src_count == xive->src_count)
1600                xive_post_save_scan(xive);
1601
1602        /* Copy the result to userspace */
1603        if (put_user(val, ubufp))
1604                return -EFAULT;
1605
1606        return 0;
1607}
1608
1609struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
1610        struct kvmppc_xive *xive, int irq)
1611{
1612        struct kvmppc_xive_src_block *sb;
1613        int i, bid;
1614
1615        bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1616
1617        mutex_lock(&xive->lock);
1618
1619        /* block already exists - somebody else got here first */
1620        if (xive->src_blocks[bid])
1621                goto out;
1622
1623        /* Create the ICS */
1624        sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1625        if (!sb)
1626                goto out;
1627
1628        sb->id = bid;
1629
1630        for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1631                sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1632                sb->irq_state[i].eisn = 0;
1633                sb->irq_state[i].guest_priority = MASKED;
1634                sb->irq_state[i].saved_priority = MASKED;
1635                sb->irq_state[i].act_priority = MASKED;
1636        }
1637        smp_wmb();
1638        xive->src_blocks[bid] = sb;
1639
1640        if (bid > xive->max_sbid)
1641                xive->max_sbid = bid;
1642
1643out:
1644        mutex_unlock(&xive->lock);
1645        return xive->src_blocks[bid];
1646}
1647
1648static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1649{
1650        struct kvm *kvm = xive->kvm;
1651        struct kvm_vcpu *vcpu = NULL;
1652        int i;
1653
1654        kvm_for_each_vcpu(i, vcpu, kvm) {
1655                struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1656
1657                if (!xc)
1658                        continue;
1659
1660                if (xc->delayed_irq == irq) {
1661                        xc->delayed_irq = 0;
1662                        xive->delayed_irqs--;
1663                        return true;
1664                }
1665        }
1666        return false;
1667}
1668
1669static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1670{
1671        struct kvmppc_xive_src_block *sb;
1672        struct kvmppc_xive_irq_state *state;
1673        u64 __user *ubufp = (u64 __user *) addr;
1674        u16 idx;
1675        u64 val;
1676        u8 act_prio, guest_prio;
1677        u32 server;
1678        int rc = 0;
1679
1680        if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1681                return -ENOENT;
1682
1683        pr_devel("set_source(irq=0x%lx)\n", irq);
1684
1685        /* Find the source */
1686        sb = kvmppc_xive_find_source(xive, irq, &idx);
1687        if (!sb) {
1688                pr_devel("No source, creating source block...\n");
1689                sb = kvmppc_xive_create_src_block(xive, irq);
1690                if (!sb) {
1691                        pr_devel("Failed to create block...\n");
1692                        return -ENOMEM;
1693                }
1694        }
1695        state = &sb->irq_state[idx];
1696
1697        /* Read user passed data */
1698        if (get_user(val, ubufp)) {
1699                pr_devel("fault getting user info !\n");
1700                return -EFAULT;
1701        }
1702
1703        server = val & KVM_XICS_DESTINATION_MASK;
1704        guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1705
1706        pr_devel("  val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1707                 val, server, guest_prio);
1708
1709        /*
1710         * If the source doesn't already have an IPI, allocate
1711         * one and get the corresponding data
1712         */
1713        if (!state->ipi_number) {
1714                state->ipi_number = xive_native_alloc_irq();
1715                if (state->ipi_number == 0) {
1716                        pr_devel("Failed to allocate IPI !\n");
1717                        return -ENOMEM;
1718                }
1719                xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1720                pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1721        }
1722
1723        /*
1724         * We use lock_and_mask() to set us in the right masked
1725         * state. We will override that state from the saved state
1726         * further down, but this will handle the cases of interrupts
1727         * that need FW masking. We set the initial guest_priority to
1728         * 0 before calling it to ensure it actually performs the masking.
1729         */
1730        state->guest_priority = 0;
1731        xive_lock_and_mask(xive, sb, state);
1732
1733        /*
1734         * Now, we select a target if we have one. If we don't we
1735         * leave the interrupt untargetted. It means that an interrupt
1736         * can become "untargetted" accross migration if it was masked
1737         * by set_xive() but there is little we can do about it.
1738         */
1739
1740        /* First convert prio and mark interrupt as untargetted */
1741        act_prio = xive_prio_from_guest(guest_prio);
1742        state->act_priority = MASKED;
1743
1744        /*
1745         * We need to drop the lock due to the mutex below. Hopefully
1746         * nothing is touching that interrupt yet since it hasn't been
1747         * advertized to a running guest yet
1748         */
1749        arch_spin_unlock(&sb->lock);
1750
1751        /* If we have a priority target the interrupt */
1752        if (act_prio != MASKED) {
1753                /* First, check provisioning of queues */
1754                mutex_lock(&xive->lock);
1755                rc = xive_check_provisioning(xive->kvm, act_prio);
1756                mutex_unlock(&xive->lock);
1757
1758                /* Target interrupt */
1759                if (rc == 0)
1760                        rc = xive_target_interrupt(xive->kvm, state,
1761                                                   server, act_prio);
1762                /*
1763                 * If provisioning or targetting failed, leave it
1764                 * alone and masked. It will remain disabled until
1765                 * the guest re-targets it.
1766                 */
1767        }
1768
1769        /*
1770         * Find out if this was a delayed irq stashed in an ICP,
1771         * in which case, treat it as pending
1772         */
1773        if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1774                val |= KVM_XICS_PENDING;
1775                pr_devel("  Found delayed ! forcing PENDING !\n");
1776        }
1777
1778        /* Cleanup the SW state */
1779        state->old_p = false;
1780        state->old_q = false;
1781        state->lsi = false;
1782        state->asserted = false;
1783
1784        /* Restore LSI state */
1785        if (val & KVM_XICS_LEVEL_SENSITIVE) {
1786                state->lsi = true;
1787                if (val & KVM_XICS_PENDING)
1788                        state->asserted = true;
1789                pr_devel("  LSI ! Asserted=%d\n", state->asserted);
1790        }
1791
1792        /*
1793         * Restore P and Q. If the interrupt was pending, we
1794         * force Q and !P, which will trigger a resend.
1795         *
1796         * That means that a guest that had both an interrupt
1797         * pending (queued) and Q set will restore with only
1798         * one instance of that interrupt instead of 2, but that
1799         * is perfectly fine as coalescing interrupts that haven't
1800         * been presented yet is always allowed.
1801         */
1802        if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1803                state->old_p = true;
1804        if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1805                state->old_q = true;
1806
1807        pr_devel("  P=%d, Q=%d\n", state->old_p, state->old_q);
1808
1809        /*
1810         * If the interrupt was unmasked, update guest priority and
1811         * perform the appropriate state transition and do a
1812         * re-trigger if necessary.
1813         */
1814        if (val & KVM_XICS_MASKED) {
1815                pr_devel("  masked, saving prio\n");
1816                state->guest_priority = MASKED;
1817                state->saved_priority = guest_prio;
1818        } else {
1819                pr_devel("  unmasked, restoring to prio %d\n", guest_prio);
1820                xive_finish_unmask(xive, sb, state, guest_prio);
1821                state->saved_priority = guest_prio;
1822        }
1823
1824        /* Increment the number of valid sources and mark this one valid */
1825        if (!state->valid)
1826                xive->src_count++;
1827        state->valid = true;
1828
1829        return 0;
1830}
1831
1832int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1833                        bool line_status)
1834{
1835        struct kvmppc_xive *xive = kvm->arch.xive;
1836        struct kvmppc_xive_src_block *sb;
1837        struct kvmppc_xive_irq_state *state;
1838        u16 idx;
1839
1840        if (!xive)
1841                return -ENODEV;
1842
1843        sb = kvmppc_xive_find_source(xive, irq, &idx);
1844        if (!sb)
1845                return -EINVAL;
1846
1847        /* Perform locklessly .... (we need to do some RCUisms here...) */
1848        state = &sb->irq_state[idx];
1849        if (!state->valid)
1850                return -EINVAL;
1851
1852        /* We don't allow a trigger on a passed-through interrupt */
1853        if (state->pt_number)
1854                return -EINVAL;
1855
1856        if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1857                state->asserted = 1;
1858        else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1859                state->asserted = 0;
1860                return 0;
1861        }
1862
1863        /* Trigger the IPI */
1864        xive_irq_trigger(&state->ipi_data);
1865
1866        return 0;
1867}
1868
1869int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr)
1870{
1871        u32 __user *ubufp = (u32 __user *) addr;
1872        u32 nr_servers;
1873        int rc = 0;
1874
1875        if (get_user(nr_servers, ubufp))
1876                return -EFAULT;
1877
1878        pr_devel("%s nr_servers=%u\n", __func__, nr_servers);
1879
1880        if (!nr_servers || nr_servers > KVM_MAX_VCPU_ID)
1881                return -EINVAL;
1882
1883        mutex_lock(&xive->lock);
1884        if (xive->vp_base != XIVE_INVALID_VP)
1885                /* The VP block is allocated once and freed when the device
1886                 * is released. Better not allow to change its size since its
1887                 * used by connect_vcpu to validate vCPU ids are valid (eg,
1888                 * setting it back to a higher value could allow connect_vcpu
1889                 * to come up with a VP id that goes beyond the VP block, which
1890                 * is likely to cause a crash in OPAL).
1891                 */
1892                rc = -EBUSY;
1893        else if (nr_servers > KVM_MAX_VCPUS)
1894                /* We don't need more servers. Higher vCPU ids get packed
1895                 * down below KVM_MAX_VCPUS by kvmppc_pack_vcpu_id().
1896                 */
1897                xive->nr_servers = KVM_MAX_VCPUS;
1898        else
1899                xive->nr_servers = nr_servers;
1900
1901        mutex_unlock(&xive->lock);
1902
1903        return rc;
1904}
1905
1906static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1907{
1908        struct kvmppc_xive *xive = dev->private;
1909
1910        /* We honor the existing XICS ioctl */
1911        switch (attr->group) {
1912        case KVM_DEV_XICS_GRP_SOURCES:
1913                return xive_set_source(xive, attr->attr, attr->addr);
1914        case KVM_DEV_XICS_GRP_CTRL:
1915                switch (attr->attr) {
1916                case KVM_DEV_XICS_NR_SERVERS:
1917                        return kvmppc_xive_set_nr_servers(xive, attr->addr);
1918                }
1919        }
1920        return -ENXIO;
1921}
1922
1923static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1924{
1925        struct kvmppc_xive *xive = dev->private;
1926
1927        /* We honor the existing XICS ioctl */
1928        switch (attr->group) {
1929        case KVM_DEV_XICS_GRP_SOURCES:
1930                return xive_get_source(xive, attr->attr, attr->addr);
1931        }
1932        return -ENXIO;
1933}
1934
1935static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1936{
1937        /* We honor the same limits as XICS, at least for now */
1938        switch (attr->group) {
1939        case KVM_DEV_XICS_GRP_SOURCES:
1940                if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1941                    attr->attr < KVMPPC_XICS_NR_IRQS)
1942                        return 0;
1943                break;
1944        case KVM_DEV_XICS_GRP_CTRL:
1945                switch (attr->attr) {
1946                case KVM_DEV_XICS_NR_SERVERS:
1947                        return 0;
1948                }
1949        }
1950        return -ENXIO;
1951}
1952
1953static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1954{
1955        xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1956        xive_native_configure_irq(hw_num, 0, MASKED, 0);
1957}
1958
1959void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1960{
1961        int i;
1962
1963        for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1964                struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1965
1966                if (!state->valid)
1967                        continue;
1968
1969                kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1970                xive_cleanup_irq_data(&state->ipi_data);
1971                xive_native_free_irq(state->ipi_number);
1972
1973                /* Pass-through, cleanup too but keep IRQ hw data */
1974                if (state->pt_number)
1975                        kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1976
1977                state->valid = false;
1978        }
1979}
1980
1981/*
1982 * Called when device fd is closed.  kvm->lock is held.
1983 */
1984static void kvmppc_xive_release(struct kvm_device *dev)
1985{
1986        struct kvmppc_xive *xive = dev->private;
1987        struct kvm *kvm = xive->kvm;
1988        struct kvm_vcpu *vcpu;
1989        int i;
1990
1991        pr_devel("Releasing xive device\n");
1992
1993        /*
1994         * Since this is the device release function, we know that
1995         * userspace does not have any open fd referring to the
1996         * device.  Therefore there can not be any of the device
1997         * attribute set/get functions being executed concurrently,
1998         * and similarly, the connect_vcpu and set/clr_mapped
1999         * functions also cannot be being executed.
2000         */
2001
2002        debugfs_remove(xive->dentry);
2003
2004        /*
2005         * We should clean up the vCPU interrupt presenters first.
2006         */
2007        kvm_for_each_vcpu(i, vcpu, kvm) {
2008                /*
2009                 * Take vcpu->mutex to ensure that no one_reg get/set ioctl
2010                 * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently.
2011                 * Holding the vcpu->mutex also means that the vcpu cannot
2012                 * be executing the KVM_RUN ioctl, and therefore it cannot
2013                 * be executing the XIVE push or pull code or accessing
2014                 * the XIVE MMIO regions.
2015                 */
2016                mutex_lock(&vcpu->mutex);
2017                kvmppc_xive_cleanup_vcpu(vcpu);
2018                mutex_unlock(&vcpu->mutex);
2019        }
2020
2021        /*
2022         * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
2023         * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
2024         * against xive code getting called during vcpu execution or
2025         * set/get one_reg operations.
2026         */
2027        kvm->arch.xive = NULL;
2028
2029        /* Mask and free interrupts */
2030        for (i = 0; i <= xive->max_sbid; i++) {
2031                if (xive->src_blocks[i])
2032                        kvmppc_xive_free_sources(xive->src_blocks[i]);
2033                kfree(xive->src_blocks[i]);
2034                xive->src_blocks[i] = NULL;
2035        }
2036
2037        if (xive->vp_base != XIVE_INVALID_VP)
2038                xive_native_free_vp_block(xive->vp_base);
2039
2040        /*
2041         * A reference of the kvmppc_xive pointer is now kept under
2042         * the xive_devices struct of the machine for reuse. It is
2043         * freed when the VM is destroyed for now until we fix all the
2044         * execution paths.
2045         */
2046
2047        kfree(dev);
2048}
2049
2050/*
2051 * When the guest chooses the interrupt mode (XICS legacy or XIVE
2052 * native), the VM will switch of KVM device. The previous device will
2053 * be "released" before the new one is created.
2054 *
2055 * Until we are sure all execution paths are well protected, provide a
2056 * fail safe (transitional) method for device destruction, in which
2057 * the XIVE device pointer is recycled and not directly freed.
2058 */
2059struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
2060{
2061        struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
2062                &kvm->arch.xive_devices.native :
2063                &kvm->arch.xive_devices.xics_on_xive;
2064        struct kvmppc_xive *xive = *kvm_xive_device;
2065
2066        if (!xive) {
2067                xive = kzalloc(sizeof(*xive), GFP_KERNEL);
2068                *kvm_xive_device = xive;
2069        } else {
2070                memset(xive, 0, sizeof(*xive));
2071        }
2072
2073        return xive;
2074}
2075
2076/*
2077 * Create a XICS device with XIVE backend.  kvm->lock is held.
2078 */
2079static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
2080{
2081        struct kvmppc_xive *xive;
2082        struct kvm *kvm = dev->kvm;
2083
2084        pr_devel("Creating xive for partition\n");
2085
2086        /* Already there ? */
2087        if (kvm->arch.xive)
2088                return -EEXIST;
2089
2090        xive = kvmppc_xive_get_device(kvm, type);
2091        if (!xive)
2092                return -ENOMEM;
2093
2094        dev->private = xive;
2095        xive->dev = dev;
2096        xive->kvm = kvm;
2097        mutex_init(&xive->lock);
2098
2099        /* We use the default queue size set by the host */
2100        xive->q_order = xive_native_default_eq_shift();
2101        if (xive->q_order < PAGE_SHIFT)
2102                xive->q_page_order = 0;
2103        else
2104                xive->q_page_order = xive->q_order - PAGE_SHIFT;
2105
2106        /* VP allocation is delayed to the first call to connect_vcpu */
2107        xive->vp_base = XIVE_INVALID_VP;
2108        /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets
2109         * on a POWER9 system.
2110         */
2111        xive->nr_servers = KVM_MAX_VCPUS;
2112
2113        xive->single_escalation = xive_native_has_single_escalation();
2114
2115        kvm->arch.xive = xive;
2116        return 0;
2117}
2118
2119int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
2120{
2121        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2122        unsigned int i;
2123
2124        for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
2125                struct xive_q *q = &xc->queues[i];
2126                u32 i0, i1, idx;
2127
2128                if (!q->qpage && !xc->esc_virq[i])
2129                        continue;
2130
2131                seq_printf(m, " [q%d]: ", i);
2132
2133                if (q->qpage) {
2134                        idx = q->idx;
2135                        i0 = be32_to_cpup(q->qpage + idx);
2136                        idx = (idx + 1) & q->msk;
2137                        i1 = be32_to_cpup(q->qpage + idx);
2138                        seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
2139                                   i0, i1);
2140                }
2141                if (xc->esc_virq[i]) {
2142                        struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
2143                        struct xive_irq_data *xd =
2144                                irq_data_get_irq_handler_data(d);
2145                        u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2146
2147                        seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
2148                                   (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
2149                                   (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
2150                                   xc->esc_virq[i], pq, xd->eoi_page);
2151                        seq_puts(m, "\n");
2152                }
2153        }
2154        return 0;
2155}
2156
2157static int xive_debug_show(struct seq_file *m, void *private)
2158{
2159        struct kvmppc_xive *xive = m->private;
2160        struct kvm *kvm = xive->kvm;
2161        struct kvm_vcpu *vcpu;
2162        u64 t_rm_h_xirr = 0;
2163        u64 t_rm_h_ipoll = 0;
2164        u64 t_rm_h_cppr = 0;
2165        u64 t_rm_h_eoi = 0;
2166        u64 t_rm_h_ipi = 0;
2167        u64 t_vm_h_xirr = 0;
2168        u64 t_vm_h_ipoll = 0;
2169        u64 t_vm_h_cppr = 0;
2170        u64 t_vm_h_eoi = 0;
2171        u64 t_vm_h_ipi = 0;
2172        unsigned int i;
2173
2174        if (!kvm)
2175                return 0;
2176
2177        seq_printf(m, "=========\nVCPU state\n=========\n");
2178
2179        kvm_for_each_vcpu(i, vcpu, kvm) {
2180                struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2181
2182                if (!xc)
2183                        continue;
2184
2185                seq_printf(m, "cpu server %#x VP:%#x CPPR:%#x HWCPPR:%#x"
2186                           " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
2187                           xc->server_num, xc->vp_id, xc->cppr, xc->hw_cppr,
2188                           xc->mfrr, xc->pending,
2189                           xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
2190
2191                kvmppc_xive_debug_show_queues(m, vcpu);
2192
2193                t_rm_h_xirr += xc->stat_rm_h_xirr;
2194                t_rm_h_ipoll += xc->stat_rm_h_ipoll;
2195                t_rm_h_cppr += xc->stat_rm_h_cppr;
2196                t_rm_h_eoi += xc->stat_rm_h_eoi;
2197                t_rm_h_ipi += xc->stat_rm_h_ipi;
2198                t_vm_h_xirr += xc->stat_vm_h_xirr;
2199                t_vm_h_ipoll += xc->stat_vm_h_ipoll;
2200                t_vm_h_cppr += xc->stat_vm_h_cppr;
2201                t_vm_h_eoi += xc->stat_vm_h_eoi;
2202                t_vm_h_ipi += xc->stat_vm_h_ipi;
2203        }
2204
2205        seq_printf(m, "Hcalls totals\n");
2206        seq_printf(m, " H_XIRR  R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
2207        seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
2208        seq_printf(m, " H_CPPR  R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
2209        seq_printf(m, " H_EOI   R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
2210        seq_printf(m, " H_IPI   R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
2211
2212        return 0;
2213}
2214
2215DEFINE_SHOW_ATTRIBUTE(xive_debug);
2216
2217static void xive_debugfs_init(struct kvmppc_xive *xive)
2218{
2219        char *name;
2220
2221        name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
2222        if (!name) {
2223                pr_err("%s: no memory for name\n", __func__);
2224                return;
2225        }
2226
2227        xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
2228                                           xive, &xive_debug_fops);
2229
2230        pr_debug("%s: created %s\n", __func__, name);
2231        kfree(name);
2232}
2233
2234static void kvmppc_xive_init(struct kvm_device *dev)
2235{
2236        struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
2237
2238        /* Register some debug interfaces */
2239        xive_debugfs_init(xive);
2240}
2241
2242struct kvm_device_ops kvm_xive_ops = {
2243        .name = "kvm-xive",
2244        .create = kvmppc_xive_create,
2245        .init = kvmppc_xive_init,
2246        .release = kvmppc_xive_release,
2247        .set_attr = xive_set_attr,
2248        .get_attr = xive_get_attr,
2249        .has_attr = xive_has_attr,
2250};
2251
2252void kvmppc_xive_init_module(void)
2253{
2254        __xive_vm_h_xirr = xive_vm_h_xirr;
2255        __xive_vm_h_ipoll = xive_vm_h_ipoll;
2256        __xive_vm_h_ipi = xive_vm_h_ipi;
2257        __xive_vm_h_cppr = xive_vm_h_cppr;
2258        __xive_vm_h_eoi = xive_vm_h_eoi;
2259}
2260
2261void kvmppc_xive_exit_module(void)
2262{
2263        __xive_vm_h_xirr = NULL;
2264        __xive_vm_h_ipoll = NULL;
2265        __xive_vm_h_ipi = NULL;
2266        __xive_vm_h_cppr = NULL;
2267        __xive_vm_h_eoi = NULL;
2268}
2269