linux/arch/powerpc/kvm/book3s_xive.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
   4 */
   5
   6#define pr_fmt(fmt) "xive-kvm: " fmt
   7
   8#include <linux/kernel.h>
   9#include <linux/kvm_host.h>
  10#include <linux/err.h>
  11#include <linux/gfp.h>
  12#include <linux/spinlock.h>
  13#include <linux/delay.h>
  14#include <linux/percpu.h>
  15#include <linux/cpumask.h>
  16#include <linux/uaccess.h>
  17#include <linux/irqdomain.h>
  18#include <asm/kvm_book3s.h>
  19#include <asm/kvm_ppc.h>
  20#include <asm/hvcall.h>
  21#include <asm/xics.h>
  22#include <asm/xive.h>
  23#include <asm/xive-regs.h>
  24#include <asm/debug.h>
  25#include <asm/debugfs.h>
  26#include <asm/time.h>
  27#include <asm/opal.h>
  28
  29#include <linux/debugfs.h>
  30#include <linux/seq_file.h>
  31
  32#include "book3s_xive.h"
  33
  34
  35/*
  36 * Virtual mode variants of the hcalls for use on radix/radix
  37 * with AIL. They require the VCPU's VP to be "pushed"
  38 *
  39 * We still instantiate them here because we use some of the
  40 * generated utility functions as well in this file.
  41 */
  42#define XIVE_RUNTIME_CHECKS
  43#define X_PFX xive_vm_
  44#define X_STATIC static
  45#define X_STAT_PFX stat_vm_
  46#define __x_tima                xive_tima
  47#define __x_eoi_page(xd)        ((void __iomem *)((xd)->eoi_mmio))
  48#define __x_trig_page(xd)       ((void __iomem *)((xd)->trig_mmio))
  49#define __x_writeb      __raw_writeb
  50#define __x_readw       __raw_readw
  51#define __x_readq       __raw_readq
  52#define __x_writeq      __raw_writeq
  53
  54#include "book3s_xive_template.c"
  55
  56/*
  57 * We leave a gap of a couple of interrupts in the queue to
  58 * account for the IPI and additional safety guard.
  59 */
  60#define XIVE_Q_GAP      2
  61
  62/*
  63 * Push a vcpu's context to the XIVE on guest entry.
  64 * This assumes we are in virtual mode (MMU on)
  65 */
  66void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
  67{
  68        void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
  69        u64 pq;
  70
  71        /*
  72         * Nothing to do if the platform doesn't have a XIVE
  73         * or this vCPU doesn't have its own XIVE context
  74         * (e.g. because it's not using an in-kernel interrupt controller).
  75         */
  76        if (!tima || !vcpu->arch.xive_cam_word)
  77                return;
  78
  79        eieio();
  80        __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
  81        __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
  82        vcpu->arch.xive_pushed = 1;
  83        eieio();
  84
  85        /*
  86         * We clear the irq_pending flag. There is a small chance of a
  87         * race vs. the escalation interrupt happening on another
  88         * processor setting it again, but the only consequence is to
  89         * cause a spurious wakeup on the next H_CEDE, which is not an
  90         * issue.
  91         */
  92        vcpu->arch.irq_pending = 0;
  93
  94        /*
  95         * In single escalation mode, if the escalation interrupt is
  96         * on, we mask it.
  97         */
  98        if (vcpu->arch.xive_esc_on) {
  99                pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
 100                                                  XIVE_ESB_SET_PQ_01));
 101                mb();
 102
 103                /*
 104                 * We have a possible subtle race here: The escalation
 105                 * interrupt might have fired and be on its way to the
 106                 * host queue while we mask it, and if we unmask it
 107                 * early enough (re-cede right away), there is a
 108                 * theorical possibility that it fires again, thus
 109                 * landing in the target queue more than once which is
 110                 * a big no-no.
 111                 *
 112                 * Fortunately, solving this is rather easy. If the
 113                 * above load setting PQ to 01 returns a previous
 114                 * value where P is set, then we know the escalation
 115                 * interrupt is somewhere on its way to the host. In
 116                 * that case we simply don't clear the xive_esc_on
 117                 * flag below. It will be eventually cleared by the
 118                 * handler for the escalation interrupt.
 119                 *
 120                 * Then, when doing a cede, we check that flag again
 121                 * before re-enabling the escalation interrupt, and if
 122                 * set, we abort the cede.
 123                 */
 124                if (!(pq & XIVE_ESB_VAL_P))
 125                        /* Now P is 0, we can clear the flag */
 126                        vcpu->arch.xive_esc_on = 0;
 127        }
 128}
 129EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
 130
 131/*
 132 * Pull a vcpu's context from the XIVE on guest exit.
 133 * This assumes we are in virtual mode (MMU on)
 134 */
 135void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
 136{
 137        void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
 138
 139        if (!vcpu->arch.xive_pushed)
 140                return;
 141
 142        /*
 143         * Should not have been pushed if there is no tima
 144         */
 145        if (WARN_ON(!tima))
 146                return;
 147
 148        eieio();
 149        /* First load to pull the context, we ignore the value */
 150        __raw_readl(tima + TM_SPC_PULL_OS_CTX);
 151        /* Second load to recover the context state (Words 0 and 1) */
 152        vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS);
 153
 154        /* Fixup some of the state for the next load */
 155        vcpu->arch.xive_saved_state.lsmfb = 0;
 156        vcpu->arch.xive_saved_state.ack = 0xff;
 157        vcpu->arch.xive_pushed = 0;
 158        eieio();
 159}
 160EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
 161
 162void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
 163{
 164        void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
 165
 166        if (!esc_vaddr)
 167                return;
 168
 169        /* we are using XIVE with single escalation */
 170
 171        if (vcpu->arch.xive_esc_on) {
 172                /*
 173                 * If we still have a pending escalation, abort the cede,
 174                 * and we must set PQ to 10 rather than 00 so that we don't
 175                 * potentially end up with two entries for the escalation
 176                 * interrupt in the XIVE interrupt queue.  In that case
 177                 * we also don't want to set xive_esc_on to 1 here in
 178                 * case we race with xive_esc_irq().
 179                 */
 180                vcpu->arch.ceded = 0;
 181                /*
 182                 * The escalation interrupts are special as we don't EOI them.
 183                 * There is no need to use the load-after-store ordering offset
 184                 * to set PQ to 10 as we won't use StoreEOI.
 185                 */
 186                __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_10);
 187        } else {
 188                vcpu->arch.xive_esc_on = true;
 189                mb();
 190                __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
 191        }
 192        mb();
 193}
 194EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);
 195
 196/*
 197 * This is a simple trigger for a generic XIVE IRQ. This must
 198 * only be called for interrupts that support a trigger page
 199 */
 200static bool xive_irq_trigger(struct xive_irq_data *xd)
 201{
 202        /* This should be only for MSIs */
 203        if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
 204                return false;
 205
 206        /* Those interrupts should always have a trigger page */
 207        if (WARN_ON(!xd->trig_mmio))
 208                return false;
 209
 210        out_be64(xd->trig_mmio, 0);
 211
 212        return true;
 213}
 214
 215static irqreturn_t xive_esc_irq(int irq, void *data)
 216{
 217        struct kvm_vcpu *vcpu = data;
 218
 219        vcpu->arch.irq_pending = 1;
 220        smp_mb();
 221        if (vcpu->arch.ceded)
 222                kvmppc_fast_vcpu_kick(vcpu);
 223
 224        /* Since we have the no-EOI flag, the interrupt is effectively
 225         * disabled now. Clearing xive_esc_on means we won't bother
 226         * doing so on the next entry.
 227         *
 228         * This also allows the entry code to know that if a PQ combination
 229         * of 10 is observed while xive_esc_on is true, it means the queue
 230         * contains an unprocessed escalation interrupt. We don't make use of
 231         * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
 232         */
 233        vcpu->arch.xive_esc_on = false;
 234
 235        /* This orders xive_esc_on = false vs. subsequent stale_p = true */
 236        smp_wmb();      /* goes with smp_mb() in cleanup_single_escalation */
 237
 238        return IRQ_HANDLED;
 239}
 240
 241int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
 242                                  bool single_escalation)
 243{
 244        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 245        struct xive_q *q = &xc->queues[prio];
 246        char *name = NULL;
 247        int rc;
 248
 249        /* Already there ? */
 250        if (xc->esc_virq[prio])
 251                return 0;
 252
 253        /* Hook up the escalation interrupt */
 254        xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
 255        if (!xc->esc_virq[prio]) {
 256                pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
 257                       prio, xc->server_num);
 258                return -EIO;
 259        }
 260
 261        if (single_escalation)
 262                name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
 263                                 vcpu->kvm->arch.lpid, xc->server_num);
 264        else
 265                name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
 266                                 vcpu->kvm->arch.lpid, xc->server_num, prio);
 267        if (!name) {
 268                pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
 269                       prio, xc->server_num);
 270                rc = -ENOMEM;
 271                goto error;
 272        }
 273
 274        pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
 275
 276        rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
 277                         IRQF_NO_THREAD, name, vcpu);
 278        if (rc) {
 279                pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
 280                       prio, xc->server_num);
 281                goto error;
 282        }
 283        xc->esc_virq_names[prio] = name;
 284
 285        /* In single escalation mode, we grab the ESB MMIO of the
 286         * interrupt and mask it. Also populate the VCPU v/raddr
 287         * of the ESB page for use by asm entry/exit code. Finally
 288         * set the XIVE_IRQ_FLAG_NO_EOI flag which will prevent the
 289         * core code from performing an EOI on the escalation
 290         * interrupt, thus leaving it effectively masked after
 291         * it fires once.
 292         */
 293        if (single_escalation) {
 294                struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
 295                struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
 296
 297                xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
 298                vcpu->arch.xive_esc_raddr = xd->eoi_page;
 299                vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
 300                xd->flags |= XIVE_IRQ_FLAG_NO_EOI;
 301        }
 302
 303        return 0;
 304error:
 305        irq_dispose_mapping(xc->esc_virq[prio]);
 306        xc->esc_virq[prio] = 0;
 307        kfree(name);
 308        return rc;
 309}
 310
 311static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
 312{
 313        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 314        struct kvmppc_xive *xive = xc->xive;
 315        struct xive_q *q =  &xc->queues[prio];
 316        void *qpage;
 317        int rc;
 318
 319        if (WARN_ON(q->qpage))
 320                return 0;
 321
 322        /* Allocate the queue and retrieve infos on current node for now */
 323        qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
 324        if (!qpage) {
 325                pr_err("Failed to allocate queue %d for VCPU %d\n",
 326                       prio, xc->server_num);
 327                return -ENOMEM;
 328        }
 329        memset(qpage, 0, 1 << xive->q_order);
 330
 331        /*
 332         * Reconfigure the queue. This will set q->qpage only once the
 333         * queue is fully configured. This is a requirement for prio 0
 334         * as we will stop doing EOIs for every IPI as soon as we observe
 335         * qpage being non-NULL, and instead will only EOI when we receive
 336         * corresponding queue 0 entries
 337         */
 338        rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
 339                                         xive->q_order, true);
 340        if (rc)
 341                pr_err("Failed to configure queue %d for VCPU %d\n",
 342                       prio, xc->server_num);
 343        return rc;
 344}
 345
 346/* Called with xive->lock held */
 347static int xive_check_provisioning(struct kvm *kvm, u8 prio)
 348{
 349        struct kvmppc_xive *xive = kvm->arch.xive;
 350        struct kvm_vcpu *vcpu;
 351        int i, rc;
 352
 353        lockdep_assert_held(&xive->lock);
 354
 355        /* Already provisioned ? */
 356        if (xive->qmap & (1 << prio))
 357                return 0;
 358
 359        pr_devel("Provisioning prio... %d\n", prio);
 360
 361        /* Provision each VCPU and enable escalations if needed */
 362        kvm_for_each_vcpu(i, vcpu, kvm) {
 363                if (!vcpu->arch.xive_vcpu)
 364                        continue;
 365                rc = xive_provision_queue(vcpu, prio);
 366                if (rc == 0 && !xive->single_escalation)
 367                        kvmppc_xive_attach_escalation(vcpu, prio,
 368                                                      xive->single_escalation);
 369                if (rc)
 370                        return rc;
 371        }
 372
 373        /* Order previous stores and mark it as provisioned */
 374        mb();
 375        xive->qmap |= (1 << prio);
 376        return 0;
 377}
 378
 379static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
 380{
 381        struct kvm_vcpu *vcpu;
 382        struct kvmppc_xive_vcpu *xc;
 383        struct xive_q *q;
 384
 385        /* Locate target server */
 386        vcpu = kvmppc_xive_find_server(kvm, server);
 387        if (!vcpu) {
 388                pr_warn("%s: Can't find server %d\n", __func__, server);
 389                return;
 390        }
 391        xc = vcpu->arch.xive_vcpu;
 392        if (WARN_ON(!xc))
 393                return;
 394
 395        q = &xc->queues[prio];
 396        atomic_inc(&q->pending_count);
 397}
 398
 399static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
 400{
 401        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 402        struct xive_q *q;
 403        u32 max;
 404
 405        if (WARN_ON(!xc))
 406                return -ENXIO;
 407        if (!xc->valid)
 408                return -ENXIO;
 409
 410        q = &xc->queues[prio];
 411        if (WARN_ON(!q->qpage))
 412                return -ENXIO;
 413
 414        /* Calculate max number of interrupts in that queue. */
 415        max = (q->msk + 1) - XIVE_Q_GAP;
 416        return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
 417}
 418
 419int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
 420{
 421        struct kvm_vcpu *vcpu;
 422        int i, rc;
 423
 424        /* Locate target server */
 425        vcpu = kvmppc_xive_find_server(kvm, *server);
 426        if (!vcpu) {
 427                pr_devel("Can't find server %d\n", *server);
 428                return -EINVAL;
 429        }
 430
 431        pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
 432
 433        /* Try pick it */
 434        rc = xive_try_pick_queue(vcpu, prio);
 435        if (rc == 0)
 436                return rc;
 437
 438        pr_devel(" .. failed, looking up candidate...\n");
 439
 440        /* Failed, pick another VCPU */
 441        kvm_for_each_vcpu(i, vcpu, kvm) {
 442                if (!vcpu->arch.xive_vcpu)
 443                        continue;
 444                rc = xive_try_pick_queue(vcpu, prio);
 445                if (rc == 0) {
 446                        *server = vcpu->arch.xive_vcpu->server_num;
 447                        pr_devel("  found on 0x%x/%d\n", *server, prio);
 448                        return rc;
 449                }
 450        }
 451        pr_devel("  no available target !\n");
 452
 453        /* No available target ! */
 454        return -EBUSY;
 455}
 456
 457static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
 458                             struct kvmppc_xive_src_block *sb,
 459                             struct kvmppc_xive_irq_state *state)
 460{
 461        struct xive_irq_data *xd;
 462        u32 hw_num;
 463        u8 old_prio;
 464        u64 val;
 465
 466        /*
 467         * Take the lock, set masked, try again if racing
 468         * with H_EOI
 469         */
 470        for (;;) {
 471                arch_spin_lock(&sb->lock);
 472                old_prio = state->guest_priority;
 473                state->guest_priority = MASKED;
 474                mb();
 475                if (!state->in_eoi)
 476                        break;
 477                state->guest_priority = old_prio;
 478                arch_spin_unlock(&sb->lock);
 479        }
 480
 481        /* No change ? Bail */
 482        if (old_prio == MASKED)
 483                return old_prio;
 484
 485        /* Get the right irq */
 486        kvmppc_xive_select_irq(state, &hw_num, &xd);
 487
 488        /* Set PQ to 10, return old P and old Q and remember them */
 489        val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
 490        state->old_p = !!(val & 2);
 491        state->old_q = !!(val & 1);
 492
 493        /*
 494         * Synchronize hardware to sensure the queues are updated when
 495         * masking
 496         */
 497        xive_native_sync_source(hw_num);
 498
 499        return old_prio;
 500}
 501
 502static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
 503                                 struct kvmppc_xive_irq_state *state)
 504{
 505        /*
 506         * Take the lock try again if racing with H_EOI
 507         */
 508        for (;;) {
 509                arch_spin_lock(&sb->lock);
 510                if (!state->in_eoi)
 511                        break;
 512                arch_spin_unlock(&sb->lock);
 513        }
 514}
 515
 516static void xive_finish_unmask(struct kvmppc_xive *xive,
 517                               struct kvmppc_xive_src_block *sb,
 518                               struct kvmppc_xive_irq_state *state,
 519                               u8 prio)
 520{
 521        struct xive_irq_data *xd;
 522        u32 hw_num;
 523
 524        /* If we aren't changing a thing, move on */
 525        if (state->guest_priority != MASKED)
 526                goto bail;
 527
 528        /* Get the right irq */
 529        kvmppc_xive_select_irq(state, &hw_num, &xd);
 530
 531        /* Old Q set, set PQ to 11 */
 532        if (state->old_q)
 533                xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
 534
 535        /*
 536         * If not old P, then perform an "effective" EOI,
 537         * on the source. This will handle the cases where
 538         * FW EOI is needed.
 539         */
 540        if (!state->old_p)
 541                xive_vm_source_eoi(hw_num, xd);
 542
 543        /* Synchronize ordering and mark unmasked */
 544        mb();
 545bail:
 546        state->guest_priority = prio;
 547}
 548
 549/*
 550 * Target an interrupt to a given server/prio, this will fallback
 551 * to another server if necessary and perform the HW targetting
 552 * updates as needed
 553 *
 554 * NOTE: Must be called with the state lock held
 555 */
 556static int xive_target_interrupt(struct kvm *kvm,
 557                                 struct kvmppc_xive_irq_state *state,
 558                                 u32 server, u8 prio)
 559{
 560        struct kvmppc_xive *xive = kvm->arch.xive;
 561        u32 hw_num;
 562        int rc;
 563
 564        /*
 565         * This will return a tentative server and actual
 566         * priority. The count for that new target will have
 567         * already been incremented.
 568         */
 569        rc = kvmppc_xive_select_target(kvm, &server, prio);
 570
 571        /*
 572         * We failed to find a target ? Not much we can do
 573         * at least until we support the GIQ.
 574         */
 575        if (rc)
 576                return rc;
 577
 578        /*
 579         * Increment the old queue pending count if there
 580         * was one so that the old queue count gets adjusted later
 581         * when observed to be empty.
 582         */
 583        if (state->act_priority != MASKED)
 584                xive_inc_q_pending(kvm,
 585                                   state->act_server,
 586                                   state->act_priority);
 587        /*
 588         * Update state and HW
 589         */
 590        state->act_priority = prio;
 591        state->act_server = server;
 592
 593        /* Get the right irq */
 594        kvmppc_xive_select_irq(state, &hw_num, NULL);
 595
 596        return xive_native_configure_irq(hw_num,
 597                                         kvmppc_xive_vp(xive, server),
 598                                         prio, state->number);
 599}
 600
 601/*
 602 * Targetting rules: In order to avoid losing track of
 603 * pending interrupts accross mask and unmask, which would
 604 * allow queue overflows, we implement the following rules:
 605 *
 606 *  - Unless it was never enabled (or we run out of capacity)
 607 *    an interrupt is always targetted at a valid server/queue
 608 *    pair even when "masked" by the guest. This pair tends to
 609 *    be the last one used but it can be changed under some
 610 *    circumstances. That allows us to separate targetting
 611 *    from masking, we only handle accounting during (re)targetting,
 612 *    this also allows us to let an interrupt drain into its target
 613 *    queue after masking, avoiding complex schemes to remove
 614 *    interrupts out of remote processor queues.
 615 *
 616 *  - When masking, we set PQ to 10 and save the previous value
 617 *    of P and Q.
 618 *
 619 *  - When unmasking, if saved Q was set, we set PQ to 11
 620 *    otherwise we leave PQ to the HW state which will be either
 621 *    10 if nothing happened or 11 if the interrupt fired while
 622 *    masked. Effectively we are OR'ing the previous Q into the
 623 *    HW Q.
 624 *
 625 *    Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
 626 *    which will unmask the interrupt and shoot a new one if Q was
 627 *    set.
 628 *
 629 *    Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
 630 *    effectively meaning an H_EOI from the guest is still expected
 631 *    for that interrupt).
 632 *
 633 *  - If H_EOI occurs while masked, we clear the saved P.
 634 *
 635 *  - When changing target, we account on the new target and
 636 *    increment a separate "pending" counter on the old one.
 637 *    This pending counter will be used to decrement the old
 638 *    target's count when its queue has been observed empty.
 639 */
 640
 641int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
 642                         u32 priority)
 643{
 644        struct kvmppc_xive *xive = kvm->arch.xive;
 645        struct kvmppc_xive_src_block *sb;
 646        struct kvmppc_xive_irq_state *state;
 647        u8 new_act_prio;
 648        int rc = 0;
 649        u16 idx;
 650
 651        if (!xive)
 652                return -ENODEV;
 653
 654        pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
 655                 irq, server, priority);
 656
 657        /* First, check provisioning of queues */
 658        if (priority != MASKED) {
 659                mutex_lock(&xive->lock);
 660                rc = xive_check_provisioning(xive->kvm,
 661                              xive_prio_from_guest(priority));
 662                mutex_unlock(&xive->lock);
 663        }
 664        if (rc) {
 665                pr_devel("  provisioning failure %d !\n", rc);
 666                return rc;
 667        }
 668
 669        sb = kvmppc_xive_find_source(xive, irq, &idx);
 670        if (!sb)
 671                return -EINVAL;
 672        state = &sb->irq_state[idx];
 673
 674        /*
 675         * We first handle masking/unmasking since the locking
 676         * might need to be retried due to EOIs, we'll handle
 677         * targetting changes later. These functions will return
 678         * with the SB lock held.
 679         *
 680         * xive_lock_and_mask() will also set state->guest_priority
 681         * but won't otherwise change other fields of the state.
 682         *
 683         * xive_lock_for_unmask will not actually unmask, this will
 684         * be done later by xive_finish_unmask() once the targetting
 685         * has been done, so we don't try to unmask an interrupt
 686         * that hasn't yet been targetted.
 687         */
 688        if (priority == MASKED)
 689                xive_lock_and_mask(xive, sb, state);
 690        else
 691                xive_lock_for_unmask(sb, state);
 692
 693
 694        /*
 695         * Then we handle targetting.
 696         *
 697         * First calculate a new "actual priority"
 698         */
 699        new_act_prio = state->act_priority;
 700        if (priority != MASKED)
 701                new_act_prio = xive_prio_from_guest(priority);
 702
 703        pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
 704                 new_act_prio, state->act_server, state->act_priority);
 705
 706        /*
 707         * Then check if we actually need to change anything,
 708         *
 709         * The condition for re-targetting the interrupt is that
 710         * we have a valid new priority (new_act_prio is not 0xff)
 711         * and either the server or the priority changed.
 712         *
 713         * Note: If act_priority was ff and the new priority is
 714         *       also ff, we don't do anything and leave the interrupt
 715         *       untargetted. An attempt of doing an int_on on an
 716         *       untargetted interrupt will fail. If that is a problem
 717         *       we could initialize interrupts with valid default
 718         */
 719
 720        if (new_act_prio != MASKED &&
 721            (state->act_server != server ||
 722             state->act_priority != new_act_prio))
 723                rc = xive_target_interrupt(kvm, state, server, new_act_prio);
 724
 725        /*
 726         * Perform the final unmasking of the interrupt source
 727         * if necessary
 728         */
 729        if (priority != MASKED)
 730                xive_finish_unmask(xive, sb, state, priority);
 731
 732        /*
 733         * Finally Update saved_priority to match. Only int_on/off
 734         * set this field to a different value.
 735         */
 736        state->saved_priority = priority;
 737
 738        arch_spin_unlock(&sb->lock);
 739        return rc;
 740}
 741
 742int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
 743                         u32 *priority)
 744{
 745        struct kvmppc_xive *xive = kvm->arch.xive;
 746        struct kvmppc_xive_src_block *sb;
 747        struct kvmppc_xive_irq_state *state;
 748        u16 idx;
 749
 750        if (!xive)
 751                return -ENODEV;
 752
 753        sb = kvmppc_xive_find_source(xive, irq, &idx);
 754        if (!sb)
 755                return -EINVAL;
 756        state = &sb->irq_state[idx];
 757        arch_spin_lock(&sb->lock);
 758        *server = state->act_server;
 759        *priority = state->guest_priority;
 760        arch_spin_unlock(&sb->lock);
 761
 762        return 0;
 763}
 764
 765int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
 766{
 767        struct kvmppc_xive *xive = kvm->arch.xive;
 768        struct kvmppc_xive_src_block *sb;
 769        struct kvmppc_xive_irq_state *state;
 770        u16 idx;
 771
 772        if (!xive)
 773                return -ENODEV;
 774
 775        sb = kvmppc_xive_find_source(xive, irq, &idx);
 776        if (!sb)
 777                return -EINVAL;
 778        state = &sb->irq_state[idx];
 779
 780        pr_devel("int_on(irq=0x%x)\n", irq);
 781
 782        /*
 783         * Check if interrupt was not targetted
 784         */
 785        if (state->act_priority == MASKED) {
 786                pr_devel("int_on on untargetted interrupt\n");
 787                return -EINVAL;
 788        }
 789
 790        /* If saved_priority is 0xff, do nothing */
 791        if (state->saved_priority == MASKED)
 792                return 0;
 793
 794        /*
 795         * Lock and unmask it.
 796         */
 797        xive_lock_for_unmask(sb, state);
 798        xive_finish_unmask(xive, sb, state, state->saved_priority);
 799        arch_spin_unlock(&sb->lock);
 800
 801        return 0;
 802}
 803
 804int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
 805{
 806        struct kvmppc_xive *xive = kvm->arch.xive;
 807        struct kvmppc_xive_src_block *sb;
 808        struct kvmppc_xive_irq_state *state;
 809        u16 idx;
 810
 811        if (!xive)
 812                return -ENODEV;
 813
 814        sb = kvmppc_xive_find_source(xive, irq, &idx);
 815        if (!sb)
 816                return -EINVAL;
 817        state = &sb->irq_state[idx];
 818
 819        pr_devel("int_off(irq=0x%x)\n", irq);
 820
 821        /*
 822         * Lock and mask
 823         */
 824        state->saved_priority = xive_lock_and_mask(xive, sb, state);
 825        arch_spin_unlock(&sb->lock);
 826
 827        return 0;
 828}
 829
 830static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
 831{
 832        struct kvmppc_xive_src_block *sb;
 833        struct kvmppc_xive_irq_state *state;
 834        u16 idx;
 835
 836        sb = kvmppc_xive_find_source(xive, irq, &idx);
 837        if (!sb)
 838                return false;
 839        state = &sb->irq_state[idx];
 840        if (!state->valid)
 841                return false;
 842
 843        /*
 844         * Trigger the IPI. This assumes we never restore a pass-through
 845         * interrupt which should be safe enough
 846         */
 847        xive_irq_trigger(&state->ipi_data);
 848
 849        return true;
 850}
 851
 852u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
 853{
 854        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 855
 856        if (!xc)
 857                return 0;
 858
 859        /* Return the per-cpu state for state saving/migration */
 860        return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
 861               (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
 862               (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
 863}
 864
 865int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
 866{
 867        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
 868        struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
 869        u8 cppr, mfrr;
 870        u32 xisr;
 871
 872        if (!xc || !xive)
 873                return -ENOENT;
 874
 875        /* Grab individual state fields. We don't use pending_pri */
 876        cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
 877        xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
 878                KVM_REG_PPC_ICP_XISR_MASK;
 879        mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
 880
 881        pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
 882                 xc->server_num, cppr, mfrr, xisr);
 883
 884        /*
 885         * We can't update the state of a "pushed" VCPU, but that
 886         * shouldn't happen because the vcpu->mutex makes running a
 887         * vcpu mutually exclusive with doing one_reg get/set on it.
 888         */
 889        if (WARN_ON(vcpu->arch.xive_pushed))
 890                return -EIO;
 891
 892        /* Update VCPU HW saved state */
 893        vcpu->arch.xive_saved_state.cppr = cppr;
 894        xc->hw_cppr = xc->cppr = cppr;
 895
 896        /*
 897         * Update MFRR state. If it's not 0xff, we mark the VCPU as
 898         * having a pending MFRR change, which will re-evaluate the
 899         * target. The VCPU will thus potentially get a spurious
 900         * interrupt but that's not a big deal.
 901         */
 902        xc->mfrr = mfrr;
 903        if (mfrr < cppr)
 904                xive_irq_trigger(&xc->vp_ipi_data);
 905
 906        /*
 907         * Now saved XIRR is "interesting". It means there's something in
 908         * the legacy "1 element" queue... for an IPI we simply ignore it,
 909         * as the MFRR restore will handle that. For anything else we need
 910         * to force a resend of the source.
 911         * However the source may not have been setup yet. If that's the
 912         * case, we keep that info and increment a counter in the xive to
 913         * tell subsequent xive_set_source() to go look.
 914         */
 915        if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
 916                xc->delayed_irq = xisr;
 917                xive->delayed_irqs++;
 918                pr_devel("  xisr restore delayed\n");
 919        }
 920
 921        return 0;
 922}
 923
 924int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
 925                           struct irq_desc *host_desc)
 926{
 927        struct kvmppc_xive *xive = kvm->arch.xive;
 928        struct kvmppc_xive_src_block *sb;
 929        struct kvmppc_xive_irq_state *state;
 930        struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
 931        unsigned int host_irq = irq_desc_get_irq(host_desc);
 932        unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
 933        u16 idx;
 934        u8 prio;
 935        int rc;
 936
 937        if (!xive)
 938                return -ENODEV;
 939
 940        pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
 941
 942        sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
 943        if (!sb)
 944                return -EINVAL;
 945        state = &sb->irq_state[idx];
 946
 947        /*
 948         * Mark the passed-through interrupt as going to a VCPU,
 949         * this will prevent further EOIs and similar operations
 950         * from the XIVE code. It will also mask the interrupt
 951         * to either PQ=10 or 11 state, the latter if the interrupt
 952         * is pending. This will allow us to unmask or retrigger it
 953         * after routing it to the guest with a simple EOI.
 954         *
 955         * The "state" argument is a "token", all it needs is to be
 956         * non-NULL to switch to passed-through or NULL for the
 957         * other way around. We may not yet have an actual VCPU
 958         * target here and we don't really care.
 959         */
 960        rc = irq_set_vcpu_affinity(host_irq, state);
 961        if (rc) {
 962                pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
 963                return rc;
 964        }
 965
 966        /*
 967         * Mask and read state of IPI. We need to know if its P bit
 968         * is set as that means it's potentially already using a
 969         * queue entry in the target
 970         */
 971        prio = xive_lock_and_mask(xive, sb, state);
 972        pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
 973                 state->old_p, state->old_q);
 974
 975        /* Turn the IPI hard off */
 976        xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
 977
 978        /*
 979         * Reset ESB guest mapping. Needed when ESB pages are exposed
 980         * to the guest in XIVE native mode
 981         */
 982        if (xive->ops && xive->ops->reset_mapped)
 983                xive->ops->reset_mapped(kvm, guest_irq);
 984
 985        /* Grab info about irq */
 986        state->pt_number = hw_irq;
 987        state->pt_data = irq_data_get_irq_handler_data(host_data);
 988
 989        /*
 990         * Configure the IRQ to match the existing configuration of
 991         * the IPI if it was already targetted. Otherwise this will
 992         * mask the interrupt in a lossy way (act_priority is 0xff)
 993         * which is fine for a never started interrupt.
 994         */
 995        xive_native_configure_irq(hw_irq,
 996                                  kvmppc_xive_vp(xive, state->act_server),
 997                                  state->act_priority, state->number);
 998
 999        /*
1000         * We do an EOI to enable the interrupt (and retrigger if needed)
1001         * if the guest has the interrupt unmasked and the P bit was *not*
1002         * set in the IPI. If it was set, we know a slot may still be in
1003         * use in the target queue thus we have to wait for a guest
1004         * originated EOI
1005         */
1006        if (prio != MASKED && !state->old_p)
1007                xive_vm_source_eoi(hw_irq, state->pt_data);
1008
1009        /* Clear old_p/old_q as they are no longer relevant */
1010        state->old_p = state->old_q = false;
1011
1012        /* Restore guest prio (unlocks EOI) */
1013        mb();
1014        state->guest_priority = prio;
1015        arch_spin_unlock(&sb->lock);
1016
1017        return 0;
1018}
1019EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
1020
1021int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
1022                           struct irq_desc *host_desc)
1023{
1024        struct kvmppc_xive *xive = kvm->arch.xive;
1025        struct kvmppc_xive_src_block *sb;
1026        struct kvmppc_xive_irq_state *state;
1027        unsigned int host_irq = irq_desc_get_irq(host_desc);
1028        u16 idx;
1029        u8 prio;
1030        int rc;
1031
1032        if (!xive)
1033                return -ENODEV;
1034
1035        pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
1036
1037        sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1038        if (!sb)
1039                return -EINVAL;
1040        state = &sb->irq_state[idx];
1041
1042        /*
1043         * Mask and read state of IRQ. We need to know if its P bit
1044         * is set as that means it's potentially already using a
1045         * queue entry in the target
1046         */
1047        prio = xive_lock_and_mask(xive, sb, state);
1048        pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
1049                 state->old_p, state->old_q);
1050
1051        /*
1052         * If old_p is set, the interrupt is pending, we switch it to
1053         * PQ=11. This will force a resend in the host so the interrupt
1054         * isn't lost to whatver host driver may pick it up
1055         */
1056        if (state->old_p)
1057                xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
1058
1059        /* Release the passed-through interrupt to the host */
1060        rc = irq_set_vcpu_affinity(host_irq, NULL);
1061        if (rc) {
1062                pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
1063                return rc;
1064        }
1065
1066        /* Forget about the IRQ */
1067        state->pt_number = 0;
1068        state->pt_data = NULL;
1069
1070        /*
1071         * Reset ESB guest mapping. Needed when ESB pages are exposed
1072         * to the guest in XIVE native mode
1073         */
1074        if (xive->ops && xive->ops->reset_mapped) {
1075                xive->ops->reset_mapped(kvm, guest_irq);
1076        }
1077
1078        /* Reconfigure the IPI */
1079        xive_native_configure_irq(state->ipi_number,
1080                                  kvmppc_xive_vp(xive, state->act_server),
1081                                  state->act_priority, state->number);
1082
1083        /*
1084         * If old_p is set (we have a queue entry potentially
1085         * occupied) or the interrupt is masked, we set the IPI
1086         * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
1087         */
1088        if (prio == MASKED || state->old_p)
1089                xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
1090        else
1091                xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
1092
1093        /* Restore guest prio (unlocks EOI) */
1094        mb();
1095        state->guest_priority = prio;
1096        arch_spin_unlock(&sb->lock);
1097
1098        return 0;
1099}
1100EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
1101
1102void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
1103{
1104        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1105        struct kvm *kvm = vcpu->kvm;
1106        struct kvmppc_xive *xive = kvm->arch.xive;
1107        int i, j;
1108
1109        for (i = 0; i <= xive->max_sbid; i++) {
1110                struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1111
1112                if (!sb)
1113                        continue;
1114                for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1115                        struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1116
1117                        if (!state->valid)
1118                                continue;
1119                        if (state->act_priority == MASKED)
1120                                continue;
1121                        if (state->act_server != xc->server_num)
1122                                continue;
1123
1124                        /* Clean it up */
1125                        arch_spin_lock(&sb->lock);
1126                        state->act_priority = MASKED;
1127                        xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1128                        xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1129                        if (state->pt_number) {
1130                                xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1131                                xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1132                        }
1133                        arch_spin_unlock(&sb->lock);
1134                }
1135        }
1136
1137        /* Disable vcpu's escalation interrupt */
1138        if (vcpu->arch.xive_esc_on) {
1139                __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
1140                                             XIVE_ESB_SET_PQ_01));
1141                vcpu->arch.xive_esc_on = false;
1142        }
1143
1144        /*
1145         * Clear pointers to escalation interrupt ESB.
1146         * This is safe because the vcpu->mutex is held, preventing
1147         * any other CPU from concurrently executing a KVM_RUN ioctl.
1148         */
1149        vcpu->arch.xive_esc_vaddr = 0;
1150        vcpu->arch.xive_esc_raddr = 0;
1151}
1152
1153/*
1154 * In single escalation mode, the escalation interrupt is marked so
1155 * that EOI doesn't re-enable it, but just sets the stale_p flag to
1156 * indicate that the P bit has already been dealt with.  However, the
1157 * assembly code that enters the guest sets PQ to 00 without clearing
1158 * stale_p (because it has no easy way to address it).  Hence we have
1159 * to adjust stale_p before shutting down the interrupt.
1160 */
1161void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
1162                                    struct kvmppc_xive_vcpu *xc, int irq)
1163{
1164        struct irq_data *d = irq_get_irq_data(irq);
1165        struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1166
1167        /*
1168         * This slightly odd sequence gives the right result
1169         * (i.e. stale_p set if xive_esc_on is false) even if
1170         * we race with xive_esc_irq() and xive_irq_eoi().
1171         */
1172        xd->stale_p = false;
1173        smp_mb();               /* paired with smb_wmb in xive_esc_irq */
1174        if (!vcpu->arch.xive_esc_on)
1175                xd->stale_p = true;
1176}
1177
1178void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1179{
1180        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1181        struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1182        int i;
1183
1184        if (!kvmppc_xics_enabled(vcpu))
1185                return;
1186
1187        if (!xc)
1188                return;
1189
1190        pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1191
1192        /* Ensure no interrupt is still routed to that VP */
1193        xc->valid = false;
1194        kvmppc_xive_disable_vcpu_interrupts(vcpu);
1195
1196        /* Mask the VP IPI */
1197        xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1198
1199        /* Free escalations */
1200        for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1201                if (xc->esc_virq[i]) {
1202                        if (xc->xive->single_escalation)
1203                                xive_cleanup_single_escalation(vcpu, xc,
1204                                                        xc->esc_virq[i]);
1205                        free_irq(xc->esc_virq[i], vcpu);
1206                        irq_dispose_mapping(xc->esc_virq[i]);
1207                        kfree(xc->esc_virq_names[i]);
1208                }
1209        }
1210
1211        /* Disable the VP */
1212        xive_native_disable_vp(xc->vp_id);
1213
1214        /* Clear the cam word so guest entry won't try to push context */
1215        vcpu->arch.xive_cam_word = 0;
1216
1217        /* Free the queues */
1218        for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1219                struct xive_q *q = &xc->queues[i];
1220
1221                xive_native_disable_queue(xc->vp_id, q, i);
1222                if (q->qpage) {
1223                        free_pages((unsigned long)q->qpage,
1224                                   xive->q_page_order);
1225                        q->qpage = NULL;
1226                }
1227        }
1228
1229        /* Free the IPI */
1230        if (xc->vp_ipi) {
1231                xive_cleanup_irq_data(&xc->vp_ipi_data);
1232                xive_native_free_irq(xc->vp_ipi);
1233        }
1234        /* Free the VP */
1235        kfree(xc);
1236
1237        /* Cleanup the vcpu */
1238        vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1239        vcpu->arch.xive_vcpu = NULL;
1240}
1241
1242static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
1243{
1244        /* We have a block of xive->nr_servers VPs. We just need to check
1245         * packed vCPU ids are below that.
1246         */
1247        return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
1248}
1249
1250int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
1251{
1252        u32 vp_id;
1253
1254        if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) {
1255                pr_devel("Out of bounds !\n");
1256                return -EINVAL;
1257        }
1258
1259        if (xive->vp_base == XIVE_INVALID_VP) {
1260                xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers);
1261                pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers);
1262
1263                if (xive->vp_base == XIVE_INVALID_VP)
1264                        return -ENOSPC;
1265        }
1266
1267        vp_id = kvmppc_xive_vp(xive, cpu);
1268        if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
1269                pr_devel("Duplicate !\n");
1270                return -EEXIST;
1271        }
1272
1273        *vp = vp_id;
1274
1275        return 0;
1276}
1277
1278int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1279                             struct kvm_vcpu *vcpu, u32 cpu)
1280{
1281        struct kvmppc_xive *xive = dev->private;
1282        struct kvmppc_xive_vcpu *xc;
1283        int i, r = -EBUSY;
1284        u32 vp_id;
1285
1286        pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1287
1288        if (dev->ops != &kvm_xive_ops) {
1289                pr_devel("Wrong ops !\n");
1290                return -EPERM;
1291        }
1292        if (xive->kvm != vcpu->kvm)
1293                return -EPERM;
1294        if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
1295                return -EBUSY;
1296
1297        /* We need to synchronize with queue provisioning */
1298        mutex_lock(&xive->lock);
1299
1300        r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id);
1301        if (r)
1302                goto bail;
1303
1304        xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1305        if (!xc) {
1306                r = -ENOMEM;
1307                goto bail;
1308        }
1309
1310        vcpu->arch.xive_vcpu = xc;
1311        xc->xive = xive;
1312        xc->vcpu = vcpu;
1313        xc->server_num = cpu;
1314        xc->vp_id = vp_id;
1315        xc->mfrr = 0xff;
1316        xc->valid = true;
1317
1318        r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1319        if (r)
1320                goto bail;
1321
1322        /* Configure VCPU fields for use by assembly push/pull */
1323        vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1324        vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1325
1326        /* Allocate IPI */
1327        xc->vp_ipi = xive_native_alloc_irq();
1328        if (!xc->vp_ipi) {
1329                pr_err("Failed to allocate xive irq for VCPU IPI\n");
1330                r = -EIO;
1331                goto bail;
1332        }
1333        pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1334
1335        r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1336        if (r)
1337                goto bail;
1338
1339        /*
1340         * Enable the VP first as the single escalation mode will
1341         * affect escalation interrupts numbering
1342         */
1343        r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1344        if (r) {
1345                pr_err("Failed to enable VP in OPAL, err %d\n", r);
1346                goto bail;
1347        }
1348
1349        /*
1350         * Initialize queues. Initially we set them all for no queueing
1351         * and we enable escalation for queue 0 only which we'll use for
1352         * our mfrr change notifications. If the VCPU is hot-plugged, we
1353         * do handle provisioning however based on the existing "map"
1354         * of enabled queues.
1355         */
1356        for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1357                struct xive_q *q = &xc->queues[i];
1358
1359                /* Single escalation, no queue 7 */
1360                if (i == 7 && xive->single_escalation)
1361                        break;
1362
1363                /* Is queue already enabled ? Provision it */
1364                if (xive->qmap & (1 << i)) {
1365                        r = xive_provision_queue(vcpu, i);
1366                        if (r == 0 && !xive->single_escalation)
1367                                kvmppc_xive_attach_escalation(
1368                                        vcpu, i, xive->single_escalation);
1369                        if (r)
1370                                goto bail;
1371                } else {
1372                        r = xive_native_configure_queue(xc->vp_id,
1373                                                        q, i, NULL, 0, true);
1374                        if (r) {
1375                                pr_err("Failed to configure queue %d for VCPU %d\n",
1376                                       i, cpu);
1377                                goto bail;
1378                        }
1379                }
1380        }
1381
1382        /* If not done above, attach priority 0 escalation */
1383        r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
1384        if (r)
1385                goto bail;
1386
1387        /* Route the IPI */
1388        r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1389        if (!r)
1390                xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1391
1392bail:
1393        mutex_unlock(&xive->lock);
1394        if (r) {
1395                kvmppc_xive_cleanup_vcpu(vcpu);
1396                return r;
1397        }
1398
1399        vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1400        return 0;
1401}
1402
1403/*
1404 * Scanning of queues before/after migration save
1405 */
1406static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1407{
1408        struct kvmppc_xive_src_block *sb;
1409        struct kvmppc_xive_irq_state *state;
1410        u16 idx;
1411
1412        sb = kvmppc_xive_find_source(xive, irq, &idx);
1413        if (!sb)
1414                return;
1415
1416        state = &sb->irq_state[idx];
1417
1418        /* Some sanity checking */
1419        if (!state->valid) {
1420                pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1421                return;
1422        }
1423
1424        /*
1425         * If the interrupt is in a queue it should have P set.
1426         * We warn so that gets reported. A backtrace isn't useful
1427         * so no need to use a WARN_ON.
1428         */
1429        if (!state->saved_p)
1430                pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1431
1432        /* Set flag */
1433        state->in_queue = true;
1434}
1435
1436static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1437                                   struct kvmppc_xive_src_block *sb,
1438                                   u32 irq)
1439{
1440        struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1441
1442        if (!state->valid)
1443                return;
1444
1445        /* Mask and save state, this will also sync HW queues */
1446        state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1447
1448        /* Transfer P and Q */
1449        state->saved_p = state->old_p;
1450        state->saved_q = state->old_q;
1451
1452        /* Unlock */
1453        arch_spin_unlock(&sb->lock);
1454}
1455
1456static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1457                                     struct kvmppc_xive_src_block *sb,
1458                                     u32 irq)
1459{
1460        struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1461
1462        if (!state->valid)
1463                return;
1464
1465        /*
1466         * Lock / exclude EOI (not technically necessary if the
1467         * guest isn't running concurrently. If this becomes a
1468         * performance issue we can probably remove the lock.
1469         */
1470        xive_lock_for_unmask(sb, state);
1471
1472        /* Restore mask/prio if it wasn't masked */
1473        if (state->saved_scan_prio != MASKED)
1474                xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1475
1476        /* Unlock */
1477        arch_spin_unlock(&sb->lock);
1478}
1479
1480static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1481{
1482        u32 idx = q->idx;
1483        u32 toggle = q->toggle;
1484        u32 irq;
1485
1486        do {
1487                irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1488                if (irq > XICS_IPI)
1489                        xive_pre_save_set_queued(xive, irq);
1490        } while(irq);
1491}
1492
1493static void xive_pre_save_scan(struct kvmppc_xive *xive)
1494{
1495        struct kvm_vcpu *vcpu = NULL;
1496        int i, j;
1497
1498        /*
1499         * See comment in xive_get_source() about how this
1500         * work. Collect a stable state for all interrupts
1501         */
1502        for (i = 0; i <= xive->max_sbid; i++) {
1503                struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1504                if (!sb)
1505                        continue;
1506                for (j = 0;  j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1507                        xive_pre_save_mask_irq(xive, sb, j);
1508        }
1509
1510        /* Then scan the queues and update the "in_queue" flag */
1511        kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1512                struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1513                if (!xc)
1514                        continue;
1515                for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1516                        if (xc->queues[j].qpage)
1517                                xive_pre_save_queue(xive, &xc->queues[j]);
1518                }
1519        }
1520
1521        /* Finally restore interrupt states */
1522        for (i = 0; i <= xive->max_sbid; i++) {
1523                struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1524                if (!sb)
1525                        continue;
1526                for (j = 0;  j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1527                        xive_pre_save_unmask_irq(xive, sb, j);
1528        }
1529}
1530
1531static void xive_post_save_scan(struct kvmppc_xive *xive)
1532{
1533        u32 i, j;
1534
1535        /* Clear all the in_queue flags */
1536        for (i = 0; i <= xive->max_sbid; i++) {
1537                struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1538                if (!sb)
1539                        continue;
1540                for (j = 0;  j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1541                        sb->irq_state[j].in_queue = false;
1542        }
1543
1544        /* Next get_source() will do a new scan */
1545        xive->saved_src_count = 0;
1546}
1547
1548/*
1549 * This returns the source configuration and state to user space.
1550 */
1551static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1552{
1553        struct kvmppc_xive_src_block *sb;
1554        struct kvmppc_xive_irq_state *state;
1555        u64 __user *ubufp = (u64 __user *) addr;
1556        u64 val, prio;
1557        u16 idx;
1558
1559        sb = kvmppc_xive_find_source(xive, irq, &idx);
1560        if (!sb)
1561                return -ENOENT;
1562
1563        state = &sb->irq_state[idx];
1564
1565        if (!state->valid)
1566                return -ENOENT;
1567
1568        pr_devel("get_source(%ld)...\n", irq);
1569
1570        /*
1571         * So to properly save the state into something that looks like a
1572         * XICS migration stream we cannot treat interrupts individually.
1573         *
1574         * We need, instead, mask them all (& save their previous PQ state)
1575         * to get a stable state in the HW, then sync them to ensure that
1576         * any interrupt that had already fired hits its queue, and finally
1577         * scan all the queues to collect which interrupts are still present
1578         * in the queues, so we can set the "pending" flag on them and
1579         * they can be resent on restore.
1580         *
1581         * So we do it all when the "first" interrupt gets saved, all the
1582         * state is collected at that point, the rest of xive_get_source()
1583         * will merely collect and convert that state to the expected
1584         * userspace bit mask.
1585         */
1586        if (xive->saved_src_count == 0)
1587                xive_pre_save_scan(xive);
1588        xive->saved_src_count++;
1589
1590        /* Convert saved state into something compatible with xics */
1591        val = state->act_server;
1592        prio = state->saved_scan_prio;
1593
1594        if (prio == MASKED) {
1595                val |= KVM_XICS_MASKED;
1596                prio = state->saved_priority;
1597        }
1598        val |= prio << KVM_XICS_PRIORITY_SHIFT;
1599        if (state->lsi) {
1600                val |= KVM_XICS_LEVEL_SENSITIVE;
1601                if (state->saved_p)
1602                        val |= KVM_XICS_PENDING;
1603        } else {
1604                if (state->saved_p)
1605                        val |= KVM_XICS_PRESENTED;
1606
1607                if (state->saved_q)
1608                        val |= KVM_XICS_QUEUED;
1609
1610                /*
1611                 * We mark it pending (which will attempt a re-delivery)
1612                 * if we are in a queue *or* we were masked and had
1613                 * Q set which is equivalent to the XICS "masked pending"
1614                 * state
1615                 */
1616                if (state->in_queue || (prio == MASKED && state->saved_q))
1617                        val |= KVM_XICS_PENDING;
1618        }
1619
1620        /*
1621         * If that was the last interrupt saved, reset the
1622         * in_queue flags
1623         */
1624        if (xive->saved_src_count == xive->src_count)
1625                xive_post_save_scan(xive);
1626
1627        /* Copy the result to userspace */
1628        if (put_user(val, ubufp))
1629                return -EFAULT;
1630
1631        return 0;
1632}
1633
1634struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
1635        struct kvmppc_xive *xive, int irq)
1636{
1637        struct kvmppc_xive_src_block *sb;
1638        int i, bid;
1639
1640        bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1641
1642        mutex_lock(&xive->lock);
1643
1644        /* block already exists - somebody else got here first */
1645        if (xive->src_blocks[bid])
1646                goto out;
1647
1648        /* Create the ICS */
1649        sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1650        if (!sb)
1651                goto out;
1652
1653        sb->id = bid;
1654
1655        for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1656                sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1657                sb->irq_state[i].eisn = 0;
1658                sb->irq_state[i].guest_priority = MASKED;
1659                sb->irq_state[i].saved_priority = MASKED;
1660                sb->irq_state[i].act_priority = MASKED;
1661        }
1662        smp_wmb();
1663        xive->src_blocks[bid] = sb;
1664
1665        if (bid > xive->max_sbid)
1666                xive->max_sbid = bid;
1667
1668out:
1669        mutex_unlock(&xive->lock);
1670        return xive->src_blocks[bid];
1671}
1672
1673static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1674{
1675        struct kvm *kvm = xive->kvm;
1676        struct kvm_vcpu *vcpu = NULL;
1677        int i;
1678
1679        kvm_for_each_vcpu(i, vcpu, kvm) {
1680                struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1681
1682                if (!xc)
1683                        continue;
1684
1685                if (xc->delayed_irq == irq) {
1686                        xc->delayed_irq = 0;
1687                        xive->delayed_irqs--;
1688                        return true;
1689                }
1690        }
1691        return false;
1692}
1693
1694static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1695{
1696        struct kvmppc_xive_src_block *sb;
1697        struct kvmppc_xive_irq_state *state;
1698        u64 __user *ubufp = (u64 __user *) addr;
1699        u16 idx;
1700        u64 val;
1701        u8 act_prio, guest_prio;
1702        u32 server;
1703        int rc = 0;
1704
1705        if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1706                return -ENOENT;
1707
1708        pr_devel("set_source(irq=0x%lx)\n", irq);
1709
1710        /* Find the source */
1711        sb = kvmppc_xive_find_source(xive, irq, &idx);
1712        if (!sb) {
1713                pr_devel("No source, creating source block...\n");
1714                sb = kvmppc_xive_create_src_block(xive, irq);
1715                if (!sb) {
1716                        pr_devel("Failed to create block...\n");
1717                        return -ENOMEM;
1718                }
1719        }
1720        state = &sb->irq_state[idx];
1721
1722        /* Read user passed data */
1723        if (get_user(val, ubufp)) {
1724                pr_devel("fault getting user info !\n");
1725                return -EFAULT;
1726        }
1727
1728        server = val & KVM_XICS_DESTINATION_MASK;
1729        guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1730
1731        pr_devel("  val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1732                 val, server, guest_prio);
1733
1734        /*
1735         * If the source doesn't already have an IPI, allocate
1736         * one and get the corresponding data
1737         */
1738        if (!state->ipi_number) {
1739                state->ipi_number = xive_native_alloc_irq();
1740                if (state->ipi_number == 0) {
1741                        pr_devel("Failed to allocate IPI !\n");
1742                        return -ENOMEM;
1743                }
1744                xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1745                pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1746        }
1747
1748        /*
1749         * We use lock_and_mask() to set us in the right masked
1750         * state. We will override that state from the saved state
1751         * further down, but this will handle the cases of interrupts
1752         * that need FW masking. We set the initial guest_priority to
1753         * 0 before calling it to ensure it actually performs the masking.
1754         */
1755        state->guest_priority = 0;
1756        xive_lock_and_mask(xive, sb, state);
1757
1758        /*
1759         * Now, we select a target if we have one. If we don't we
1760         * leave the interrupt untargetted. It means that an interrupt
1761         * can become "untargetted" accross migration if it was masked
1762         * by set_xive() but there is little we can do about it.
1763         */
1764
1765        /* First convert prio and mark interrupt as untargetted */
1766        act_prio = xive_prio_from_guest(guest_prio);
1767        state->act_priority = MASKED;
1768
1769        /*
1770         * We need to drop the lock due to the mutex below. Hopefully
1771         * nothing is touching that interrupt yet since it hasn't been
1772         * advertized to a running guest yet
1773         */
1774        arch_spin_unlock(&sb->lock);
1775
1776        /* If we have a priority target the interrupt */
1777        if (act_prio != MASKED) {
1778                /* First, check provisioning of queues */
1779                mutex_lock(&xive->lock);
1780                rc = xive_check_provisioning(xive->kvm, act_prio);
1781                mutex_unlock(&xive->lock);
1782
1783                /* Target interrupt */
1784                if (rc == 0)
1785                        rc = xive_target_interrupt(xive->kvm, state,
1786                                                   server, act_prio);
1787                /*
1788                 * If provisioning or targetting failed, leave it
1789                 * alone and masked. It will remain disabled until
1790                 * the guest re-targets it.
1791                 */
1792        }
1793
1794        /*
1795         * Find out if this was a delayed irq stashed in an ICP,
1796         * in which case, treat it as pending
1797         */
1798        if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1799                val |= KVM_XICS_PENDING;
1800                pr_devel("  Found delayed ! forcing PENDING !\n");
1801        }
1802
1803        /* Cleanup the SW state */
1804        state->old_p = false;
1805        state->old_q = false;
1806        state->lsi = false;
1807        state->asserted = false;
1808
1809        /* Restore LSI state */
1810        if (val & KVM_XICS_LEVEL_SENSITIVE) {
1811                state->lsi = true;
1812                if (val & KVM_XICS_PENDING)
1813                        state->asserted = true;
1814                pr_devel("  LSI ! Asserted=%d\n", state->asserted);
1815        }
1816
1817        /*
1818         * Restore P and Q. If the interrupt was pending, we
1819         * force Q and !P, which will trigger a resend.
1820         *
1821         * That means that a guest that had both an interrupt
1822         * pending (queued) and Q set will restore with only
1823         * one instance of that interrupt instead of 2, but that
1824         * is perfectly fine as coalescing interrupts that haven't
1825         * been presented yet is always allowed.
1826         */
1827        if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1828                state->old_p = true;
1829        if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1830                state->old_q = true;
1831
1832        pr_devel("  P=%d, Q=%d\n", state->old_p, state->old_q);
1833
1834        /*
1835         * If the interrupt was unmasked, update guest priority and
1836         * perform the appropriate state transition and do a
1837         * re-trigger if necessary.
1838         */
1839        if (val & KVM_XICS_MASKED) {
1840                pr_devel("  masked, saving prio\n");
1841                state->guest_priority = MASKED;
1842                state->saved_priority = guest_prio;
1843        } else {
1844                pr_devel("  unmasked, restoring to prio %d\n", guest_prio);
1845                xive_finish_unmask(xive, sb, state, guest_prio);
1846                state->saved_priority = guest_prio;
1847        }
1848
1849        /* Increment the number of valid sources and mark this one valid */
1850        if (!state->valid)
1851                xive->src_count++;
1852        state->valid = true;
1853
1854        return 0;
1855}
1856
1857int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1858                        bool line_status)
1859{
1860        struct kvmppc_xive *xive = kvm->arch.xive;
1861        struct kvmppc_xive_src_block *sb;
1862        struct kvmppc_xive_irq_state *state;
1863        u16 idx;
1864
1865        if (!xive)
1866                return -ENODEV;
1867
1868        sb = kvmppc_xive_find_source(xive, irq, &idx);
1869        if (!sb)
1870                return -EINVAL;
1871
1872        /* Perform locklessly .... (we need to do some RCUisms here...) */
1873        state = &sb->irq_state[idx];
1874        if (!state->valid)
1875                return -EINVAL;
1876
1877        /* We don't allow a trigger on a passed-through interrupt */
1878        if (state->pt_number)
1879                return -EINVAL;
1880
1881        if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1882                state->asserted = true;
1883        else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1884                state->asserted = false;
1885                return 0;
1886        }
1887
1888        /* Trigger the IPI */
1889        xive_irq_trigger(&state->ipi_data);
1890
1891        return 0;
1892}
1893
1894int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr)
1895{
1896        u32 __user *ubufp = (u32 __user *) addr;
1897        u32 nr_servers;
1898        int rc = 0;
1899
1900        if (get_user(nr_servers, ubufp))
1901                return -EFAULT;
1902
1903        pr_devel("%s nr_servers=%u\n", __func__, nr_servers);
1904
1905        if (!nr_servers || nr_servers > KVM_MAX_VCPU_ID)
1906                return -EINVAL;
1907
1908        mutex_lock(&xive->lock);
1909        if (xive->vp_base != XIVE_INVALID_VP)
1910                /* The VP block is allocated once and freed when the device
1911                 * is released. Better not allow to change its size since its
1912                 * used by connect_vcpu to validate vCPU ids are valid (eg,
1913                 * setting it back to a higher value could allow connect_vcpu
1914                 * to come up with a VP id that goes beyond the VP block, which
1915                 * is likely to cause a crash in OPAL).
1916                 */
1917                rc = -EBUSY;
1918        else if (nr_servers > KVM_MAX_VCPUS)
1919                /* We don't need more servers. Higher vCPU ids get packed
1920                 * down below KVM_MAX_VCPUS by kvmppc_pack_vcpu_id().
1921                 */
1922                xive->nr_servers = KVM_MAX_VCPUS;
1923        else
1924                xive->nr_servers = nr_servers;
1925
1926        mutex_unlock(&xive->lock);
1927
1928        return rc;
1929}
1930
1931static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1932{
1933        struct kvmppc_xive *xive = dev->private;
1934
1935        /* We honor the existing XICS ioctl */
1936        switch (attr->group) {
1937        case KVM_DEV_XICS_GRP_SOURCES:
1938                return xive_set_source(xive, attr->attr, attr->addr);
1939        case KVM_DEV_XICS_GRP_CTRL:
1940                switch (attr->attr) {
1941                case KVM_DEV_XICS_NR_SERVERS:
1942                        return kvmppc_xive_set_nr_servers(xive, attr->addr);
1943                }
1944        }
1945        return -ENXIO;
1946}
1947
1948static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1949{
1950        struct kvmppc_xive *xive = dev->private;
1951
1952        /* We honor the existing XICS ioctl */
1953        switch (attr->group) {
1954        case KVM_DEV_XICS_GRP_SOURCES:
1955                return xive_get_source(xive, attr->attr, attr->addr);
1956        }
1957        return -ENXIO;
1958}
1959
1960static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1961{
1962        /* We honor the same limits as XICS, at least for now */
1963        switch (attr->group) {
1964        case KVM_DEV_XICS_GRP_SOURCES:
1965                if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1966                    attr->attr < KVMPPC_XICS_NR_IRQS)
1967                        return 0;
1968                break;
1969        case KVM_DEV_XICS_GRP_CTRL:
1970                switch (attr->attr) {
1971                case KVM_DEV_XICS_NR_SERVERS:
1972                        return 0;
1973                }
1974        }
1975        return -ENXIO;
1976}
1977
1978static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1979{
1980        xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1981        xive_native_configure_irq(hw_num, 0, MASKED, 0);
1982}
1983
1984void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1985{
1986        int i;
1987
1988        for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1989                struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1990
1991                if (!state->valid)
1992                        continue;
1993
1994                kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1995                xive_cleanup_irq_data(&state->ipi_data);
1996                xive_native_free_irq(state->ipi_number);
1997
1998                /* Pass-through, cleanup too but keep IRQ hw data */
1999                if (state->pt_number)
2000                        kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
2001
2002                state->valid = false;
2003        }
2004}
2005
2006/*
2007 * Called when device fd is closed.  kvm->lock is held.
2008 */
2009static void kvmppc_xive_release(struct kvm_device *dev)
2010{
2011        struct kvmppc_xive *xive = dev->private;
2012        struct kvm *kvm = xive->kvm;
2013        struct kvm_vcpu *vcpu;
2014        int i;
2015
2016        pr_devel("Releasing xive device\n");
2017
2018        /*
2019         * Since this is the device release function, we know that
2020         * userspace does not have any open fd referring to the
2021         * device.  Therefore there can not be any of the device
2022         * attribute set/get functions being executed concurrently,
2023         * and similarly, the connect_vcpu and set/clr_mapped
2024         * functions also cannot be being executed.
2025         */
2026
2027        debugfs_remove(xive->dentry);
2028
2029        /*
2030         * We should clean up the vCPU interrupt presenters first.
2031         */
2032        kvm_for_each_vcpu(i, vcpu, kvm) {
2033                /*
2034                 * Take vcpu->mutex to ensure that no one_reg get/set ioctl
2035                 * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently.
2036                 * Holding the vcpu->mutex also means that the vcpu cannot
2037                 * be executing the KVM_RUN ioctl, and therefore it cannot
2038                 * be executing the XIVE push or pull code or accessing
2039                 * the XIVE MMIO regions.
2040                 */
2041                mutex_lock(&vcpu->mutex);
2042                kvmppc_xive_cleanup_vcpu(vcpu);
2043                mutex_unlock(&vcpu->mutex);
2044        }
2045
2046        /*
2047         * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
2048         * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
2049         * against xive code getting called during vcpu execution or
2050         * set/get one_reg operations.
2051         */
2052        kvm->arch.xive = NULL;
2053
2054        /* Mask and free interrupts */
2055        for (i = 0; i <= xive->max_sbid; i++) {
2056                if (xive->src_blocks[i])
2057                        kvmppc_xive_free_sources(xive->src_blocks[i]);
2058                kfree(xive->src_blocks[i]);
2059                xive->src_blocks[i] = NULL;
2060        }
2061
2062        if (xive->vp_base != XIVE_INVALID_VP)
2063                xive_native_free_vp_block(xive->vp_base);
2064
2065        /*
2066         * A reference of the kvmppc_xive pointer is now kept under
2067         * the xive_devices struct of the machine for reuse. It is
2068         * freed when the VM is destroyed for now until we fix all the
2069         * execution paths.
2070         */
2071
2072        kfree(dev);
2073}
2074
2075/*
2076 * When the guest chooses the interrupt mode (XICS legacy or XIVE
2077 * native), the VM will switch of KVM device. The previous device will
2078 * be "released" before the new one is created.
2079 *
2080 * Until we are sure all execution paths are well protected, provide a
2081 * fail safe (transitional) method for device destruction, in which
2082 * the XIVE device pointer is recycled and not directly freed.
2083 */
2084struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
2085{
2086        struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
2087                &kvm->arch.xive_devices.native :
2088                &kvm->arch.xive_devices.xics_on_xive;
2089        struct kvmppc_xive *xive = *kvm_xive_device;
2090
2091        if (!xive) {
2092                xive = kzalloc(sizeof(*xive), GFP_KERNEL);
2093                *kvm_xive_device = xive;
2094        } else {
2095                memset(xive, 0, sizeof(*xive));
2096        }
2097
2098        return xive;
2099}
2100
2101/*
2102 * Create a XICS device with XIVE backend.  kvm->lock is held.
2103 */
2104static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
2105{
2106        struct kvmppc_xive *xive;
2107        struct kvm *kvm = dev->kvm;
2108
2109        pr_devel("Creating xive for partition\n");
2110
2111        /* Already there ? */
2112        if (kvm->arch.xive)
2113                return -EEXIST;
2114
2115        xive = kvmppc_xive_get_device(kvm, type);
2116        if (!xive)
2117                return -ENOMEM;
2118
2119        dev->private = xive;
2120        xive->dev = dev;
2121        xive->kvm = kvm;
2122        mutex_init(&xive->lock);
2123
2124        /* We use the default queue size set by the host */
2125        xive->q_order = xive_native_default_eq_shift();
2126        if (xive->q_order < PAGE_SHIFT)
2127                xive->q_page_order = 0;
2128        else
2129                xive->q_page_order = xive->q_order - PAGE_SHIFT;
2130
2131        /* VP allocation is delayed to the first call to connect_vcpu */
2132        xive->vp_base = XIVE_INVALID_VP;
2133        /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets
2134         * on a POWER9 system.
2135         */
2136        xive->nr_servers = KVM_MAX_VCPUS;
2137
2138        xive->single_escalation = xive_native_has_single_escalation();
2139
2140        kvm->arch.xive = xive;
2141        return 0;
2142}
2143
2144int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
2145{
2146        struct kvmppc_vcore *vc = vcpu->arch.vcore;
2147
2148        /* The VM should have configured XICS mode before doing XICS hcalls. */
2149        if (!kvmppc_xics_enabled(vcpu))
2150                return H_TOO_HARD;
2151
2152        switch (req) {
2153        case H_XIRR:
2154                return xive_vm_h_xirr(vcpu);
2155        case H_CPPR:
2156                return xive_vm_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
2157        case H_EOI:
2158                return xive_vm_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
2159        case H_IPI:
2160                return xive_vm_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
2161                                          kvmppc_get_gpr(vcpu, 5));
2162        case H_IPOLL:
2163                return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
2164        case H_XIRR_X:
2165                xive_vm_h_xirr(vcpu);
2166                kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
2167                return H_SUCCESS;
2168        }
2169
2170        return H_UNSUPPORTED;
2171}
2172EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall);
2173
2174int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
2175{
2176        struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2177        unsigned int i;
2178
2179        for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
2180                struct xive_q *q = &xc->queues[i];
2181                u32 i0, i1, idx;
2182
2183                if (!q->qpage && !xc->esc_virq[i])
2184                        continue;
2185
2186                if (q->qpage) {
2187                        seq_printf(m, "    q[%d]: ", i);
2188                        idx = q->idx;
2189                        i0 = be32_to_cpup(q->qpage + idx);
2190                        idx = (idx + 1) & q->msk;
2191                        i1 = be32_to_cpup(q->qpage + idx);
2192                        seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
2193                                   i0, i1);
2194                }
2195                if (xc->esc_virq[i]) {
2196                        struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
2197                        struct xive_irq_data *xd =
2198                                irq_data_get_irq_handler_data(d);
2199                        u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2200
2201                        seq_printf(m, "    ESC %d %c%c EOI @%llx",
2202                                   xc->esc_virq[i],
2203                                   (pq & XIVE_ESB_VAL_P) ? 'P' : '-',
2204                                   (pq & XIVE_ESB_VAL_Q) ? 'Q' : '-',
2205                                   xd->eoi_page);
2206                        seq_puts(m, "\n");
2207                }
2208        }
2209        return 0;
2210}
2211
2212void kvmppc_xive_debug_show_sources(struct seq_file *m,
2213                                    struct kvmppc_xive_src_block *sb)
2214{
2215        int i;
2216
2217        seq_puts(m, "    LISN      HW/CHIP   TYPE    PQ      EISN    CPU/PRIO\n");
2218        for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
2219                struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
2220                struct xive_irq_data *xd;
2221                u64 pq;
2222                u32 hw_num;
2223
2224                if (!state->valid)
2225                        continue;
2226
2227                kvmppc_xive_select_irq(state, &hw_num, &xd);
2228
2229                pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2230
2231                seq_printf(m, "%08x  %08x/%02x", state->number, hw_num,
2232                           xd->src_chip);
2233                if (state->lsi)
2234                        seq_printf(m, " %cLSI", state->asserted ? '^' : ' ');
2235                else
2236                        seq_puts(m, "  MSI");
2237
2238                seq_printf(m, " %s  %c%c  %08x   % 4d/%d",
2239                           state->ipi_number == hw_num ? "IPI" : " PT",
2240                           pq & XIVE_ESB_VAL_P ? 'P' : '-',
2241                           pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
2242                           state->eisn, state->act_server,
2243                           state->act_priority);
2244
2245                seq_puts(m, "\n");
2246        }
2247}
2248
2249static int xive_debug_show(struct seq_file *m, void *private)
2250{
2251        struct kvmppc_xive *xive = m->private;
2252        struct kvm *kvm = xive->kvm;
2253        struct kvm_vcpu *vcpu;
2254        u64 t_rm_h_xirr = 0;
2255        u64 t_rm_h_ipoll = 0;
2256        u64 t_rm_h_cppr = 0;
2257        u64 t_rm_h_eoi = 0;
2258        u64 t_rm_h_ipi = 0;
2259        u64 t_vm_h_xirr = 0;
2260        u64 t_vm_h_ipoll = 0;
2261        u64 t_vm_h_cppr = 0;
2262        u64 t_vm_h_eoi = 0;
2263        u64 t_vm_h_ipi = 0;
2264        unsigned int i;
2265
2266        if (!kvm)
2267                return 0;
2268
2269        seq_puts(m, "=========\nVCPU state\n=========\n");
2270
2271        kvm_for_each_vcpu(i, vcpu, kvm) {
2272                struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2273
2274                if (!xc)
2275                        continue;
2276
2277                seq_printf(m, "VCPU %d: VP:%#x/%02x\n"
2278                         "    CPPR:%#x HWCPPR:%#x MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
2279                         xc->server_num, xc->vp_id, xc->vp_chip_id,
2280                         xc->cppr, xc->hw_cppr,
2281                         xc->mfrr, xc->pending,
2282                         xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
2283
2284                kvmppc_xive_debug_show_queues(m, vcpu);
2285
2286                t_rm_h_xirr += xc->stat_rm_h_xirr;
2287                t_rm_h_ipoll += xc->stat_rm_h_ipoll;
2288                t_rm_h_cppr += xc->stat_rm_h_cppr;
2289                t_rm_h_eoi += xc->stat_rm_h_eoi;
2290                t_rm_h_ipi += xc->stat_rm_h_ipi;
2291                t_vm_h_xirr += xc->stat_vm_h_xirr;
2292                t_vm_h_ipoll += xc->stat_vm_h_ipoll;
2293                t_vm_h_cppr += xc->stat_vm_h_cppr;
2294                t_vm_h_eoi += xc->stat_vm_h_eoi;
2295                t_vm_h_ipi += xc->stat_vm_h_ipi;
2296        }
2297
2298        seq_puts(m, "Hcalls totals\n");
2299        seq_printf(m, " H_XIRR  R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
2300        seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
2301        seq_printf(m, " H_CPPR  R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
2302        seq_printf(m, " H_EOI   R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
2303        seq_printf(m, " H_IPI   R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
2304
2305        seq_puts(m, "=========\nSources\n=========\n");
2306
2307        for (i = 0; i <= xive->max_sbid; i++) {
2308                struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2309
2310                if (sb) {
2311                        arch_spin_lock(&sb->lock);
2312                        kvmppc_xive_debug_show_sources(m, sb);
2313                        arch_spin_unlock(&sb->lock);
2314                }
2315        }
2316
2317        return 0;
2318}
2319
2320DEFINE_SHOW_ATTRIBUTE(xive_debug);
2321
2322static void xive_debugfs_init(struct kvmppc_xive *xive)
2323{
2324        char *name;
2325
2326        name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
2327        if (!name) {
2328                pr_err("%s: no memory for name\n", __func__);
2329                return;
2330        }
2331
2332        xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
2333                                           xive, &xive_debug_fops);
2334
2335        pr_debug("%s: created %s\n", __func__, name);
2336        kfree(name);
2337}
2338
2339static void kvmppc_xive_init(struct kvm_device *dev)
2340{
2341        struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
2342
2343        /* Register some debug interfaces */
2344        xive_debugfs_init(xive);
2345}
2346
2347struct kvm_device_ops kvm_xive_ops = {
2348        .name = "kvm-xive",
2349        .create = kvmppc_xive_create,
2350        .init = kvmppc_xive_init,
2351        .release = kvmppc_xive_release,
2352        .set_attr = xive_set_attr,
2353        .get_attr = xive_get_attr,
2354        .has_attr = xive_has_attr,
2355};
2356