linux/arch/powerpc/sysdev/xive/native.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright 2016,2017 IBM Corporation.
   4 */
   5
   6#define pr_fmt(fmt) "xive: " fmt
   7
   8#include <linux/types.h>
   9#include <linux/irq.h>
  10#include <linux/debugfs.h>
  11#include <linux/smp.h>
  12#include <linux/interrupt.h>
  13#include <linux/seq_file.h>
  14#include <linux/init.h>
  15#include <linux/of.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18#include <linux/delay.h>
  19#include <linux/cpumask.h>
  20#include <linux/mm.h>
  21
  22#include <asm/machdep.h>
  23#include <asm/prom.h>
  24#include <asm/io.h>
  25#include <asm/smp.h>
  26#include <asm/irq.h>
  27#include <asm/errno.h>
  28#include <asm/xive.h>
  29#include <asm/xive-regs.h>
  30#include <asm/opal.h>
  31#include <asm/kvm_ppc.h>
  32
  33#include "xive-internal.h"
  34
  35
  36static u32 xive_provision_size;
  37static u32 *xive_provision_chips;
  38static u32 xive_provision_chip_count;
  39static u32 xive_queue_shift;
  40static u32 xive_pool_vps = XIVE_INVALID_VP;
  41static struct kmem_cache *xive_provision_cache;
  42static bool xive_has_single_esc;
  43
  44int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
  45{
  46        __be64 flags, eoi_page, trig_page;
  47        __be32 esb_shift, src_chip;
  48        u64 opal_flags;
  49        s64 rc;
  50
  51        memset(data, 0, sizeof(*data));
  52
  53        rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
  54                                    &esb_shift, &src_chip);
  55        if (rc) {
  56                pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
  57                       hw_irq, rc);
  58                return -EINVAL;
  59        }
  60
  61        opal_flags = be64_to_cpu(flags);
  62        if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
  63                data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
  64        if (opal_flags & OPAL_XIVE_IRQ_LSI)
  65                data->flags |= XIVE_IRQ_FLAG_LSI;
  66        if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG)
  67                data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG;
  68        if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW)
  69                data->flags |= XIVE_IRQ_FLAG_MASK_FW;
  70        if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW)
  71                data->flags |= XIVE_IRQ_FLAG_EOI_FW;
  72        data->eoi_page = be64_to_cpu(eoi_page);
  73        data->trig_page = be64_to_cpu(trig_page);
  74        data->esb_shift = be32_to_cpu(esb_shift);
  75        data->src_chip = be32_to_cpu(src_chip);
  76
  77        data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
  78        if (!data->eoi_mmio) {
  79                pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
  80                return -ENOMEM;
  81        }
  82
  83        data->hw_irq = hw_irq;
  84
  85        if (!data->trig_page)
  86                return 0;
  87        if (data->trig_page == data->eoi_page) {
  88                data->trig_mmio = data->eoi_mmio;
  89                return 0;
  90        }
  91
  92        data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
  93        if (!data->trig_mmio) {
  94                pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
  95                return -ENOMEM;
  96        }
  97        return 0;
  98}
  99EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
 100
 101int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
 102{
 103        s64 rc;
 104
 105        for (;;) {
 106                rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
 107                if (rc != OPAL_BUSY)
 108                        break;
 109                msleep(OPAL_BUSY_DELAY_MS);
 110        }
 111        return rc == 0 ? 0 : -ENXIO;
 112}
 113EXPORT_SYMBOL_GPL(xive_native_configure_irq);
 114
 115static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
 116                                      u32 *sw_irq)
 117{
 118        s64 rc;
 119        __be64 vp;
 120        __be32 lirq;
 121
 122        rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq);
 123
 124        *target = be64_to_cpu(vp);
 125        *sw_irq = be32_to_cpu(lirq);
 126
 127        return rc == 0 ? 0 : -ENXIO;
 128}
 129
 130/* This can be called multiple time to change a queue configuration */
 131int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
 132                                __be32 *qpage, u32 order, bool can_escalate)
 133{
 134        s64 rc = 0;
 135        __be64 qeoi_page_be;
 136        __be32 esc_irq_be;
 137        u64 flags, qpage_phys;
 138
 139        /* If there's an actual queue page, clean it */
 140        if (order) {
 141                if (WARN_ON(!qpage))
 142                        return -EINVAL;
 143                qpage_phys = __pa(qpage);
 144        } else
 145                qpage_phys = 0;
 146
 147        /* Initialize the rest of the fields */
 148        q->msk = order ? ((1u << (order - 2)) - 1) : 0;
 149        q->idx = 0;
 150        q->toggle = 0;
 151
 152        rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
 153                                      &qeoi_page_be,
 154                                      &esc_irq_be,
 155                                      NULL);
 156        if (rc) {
 157                pr_err("Error %lld getting queue info prio %d\n", rc, prio);
 158                rc = -EIO;
 159                goto fail;
 160        }
 161        q->eoi_phys = be64_to_cpu(qeoi_page_be);
 162
 163        /* Default flags */
 164        flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
 165
 166        /* Escalation needed ? */
 167        if (can_escalate) {
 168                q->esc_irq = be32_to_cpu(esc_irq_be);
 169                flags |= OPAL_XIVE_EQ_ESCALATE;
 170        }
 171
 172        /* Configure and enable the queue in HW */
 173        for (;;) {
 174                rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
 175                if (rc != OPAL_BUSY)
 176                        break;
 177                msleep(OPAL_BUSY_DELAY_MS);
 178        }
 179        if (rc) {
 180                pr_err("Error %lld setting queue for prio %d\n", rc, prio);
 181                rc = -EIO;
 182        } else {
 183                /*
 184                 * KVM code requires all of the above to be visible before
 185                 * q->qpage is set due to how it manages IPI EOIs
 186                 */
 187                wmb();
 188                q->qpage = qpage;
 189        }
 190fail:
 191        return rc;
 192}
 193EXPORT_SYMBOL_GPL(xive_native_configure_queue);
 194
 195static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
 196{
 197        s64 rc;
 198
 199        /* Disable the queue in HW */
 200        for (;;) {
 201                rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
 202                if (rc != OPAL_BUSY)
 203                        break;
 204                msleep(OPAL_BUSY_DELAY_MS);
 205        }
 206        if (rc)
 207                pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
 208}
 209
 210void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
 211{
 212        __xive_native_disable_queue(vp_id, q, prio);
 213}
 214EXPORT_SYMBOL_GPL(xive_native_disable_queue);
 215
 216static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
 217{
 218        struct xive_q *q = &xc->queue[prio];
 219        __be32 *qpage;
 220
 221        qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
 222        if (IS_ERR(qpage))
 223                return PTR_ERR(qpage);
 224
 225        return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
 226                                           q, prio, qpage, xive_queue_shift, false);
 227}
 228
 229static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
 230{
 231        struct xive_q *q = &xc->queue[prio];
 232        unsigned int alloc_order;
 233
 234        /*
 235         * We use the variant with no iounmap as this is called on exec
 236         * from an IPI and iounmap isn't safe
 237         */
 238        __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
 239        alloc_order = xive_alloc_order(xive_queue_shift);
 240        free_pages((unsigned long)q->qpage, alloc_order);
 241        q->qpage = NULL;
 242}
 243
 244static bool xive_native_match(struct device_node *node)
 245{
 246        return of_device_is_compatible(node, "ibm,opal-xive-vc");
 247}
 248
 249static s64 opal_xive_allocate_irq(u32 chip_id)
 250{
 251        s64 irq = opal_xive_allocate_irq_raw(chip_id);
 252
 253        /*
 254         * Old versions of skiboot can incorrectly return 0xffffffff to
 255         * indicate no space, fix it up here.
 256         */
 257        return irq == 0xffffffff ? OPAL_RESOURCE : irq;
 258}
 259
 260#ifdef CONFIG_SMP
 261static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
 262{
 263        s64 irq;
 264
 265        /* Allocate an IPI and populate info about it */
 266        for (;;) {
 267                irq = opal_xive_allocate_irq(xc->chip_id);
 268                if (irq == OPAL_BUSY) {
 269                        msleep(OPAL_BUSY_DELAY_MS);
 270                        continue;
 271                }
 272                if (irq < 0) {
 273                        pr_err("Failed to allocate IPI on CPU %d\n", cpu);
 274                        return -ENXIO;
 275                }
 276                xc->hw_ipi = irq;
 277                break;
 278        }
 279        return 0;
 280}
 281#endif /* CONFIG_SMP */
 282
 283u32 xive_native_alloc_irq(void)
 284{
 285        s64 rc;
 286
 287        for (;;) {
 288                rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
 289                if (rc != OPAL_BUSY)
 290                        break;
 291                msleep(OPAL_BUSY_DELAY_MS);
 292        }
 293        if (rc < 0)
 294                return 0;
 295        return rc;
 296}
 297EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
 298
 299void xive_native_free_irq(u32 irq)
 300{
 301        for (;;) {
 302                s64 rc = opal_xive_free_irq(irq);
 303                if (rc != OPAL_BUSY)
 304                        break;
 305                msleep(OPAL_BUSY_DELAY_MS);
 306        }
 307}
 308EXPORT_SYMBOL_GPL(xive_native_free_irq);
 309
 310#ifdef CONFIG_SMP
 311static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
 312{
 313        s64 rc;
 314
 315        /* Free the IPI */
 316        if (xc->hw_ipi == XIVE_BAD_IRQ)
 317                return;
 318        for (;;) {
 319                rc = opal_xive_free_irq(xc->hw_ipi);
 320                if (rc == OPAL_BUSY) {
 321                        msleep(OPAL_BUSY_DELAY_MS);
 322                        continue;
 323                }
 324                xc->hw_ipi = XIVE_BAD_IRQ;
 325                break;
 326        }
 327}
 328#endif /* CONFIG_SMP */
 329
 330static void xive_native_shutdown(void)
 331{
 332        /* Switch the XIVE to emulation mode */
 333        opal_xive_reset(OPAL_XIVE_MODE_EMU);
 334}
 335
 336/*
 337 * Perform an "ack" cycle on the current thread, thus
 338 * grabbing the pending active priorities and updating
 339 * the CPPR to the most favored one.
 340 */
 341static void xive_native_update_pending(struct xive_cpu *xc)
 342{
 343        u8 he, cppr;
 344        u16 ack;
 345
 346        /* Perform the acknowledge hypervisor to register cycle */
 347        ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
 348
 349        /* Synchronize subsequent queue accesses */
 350        mb();
 351
 352        /*
 353         * Grab the CPPR and the "HE" field which indicates the source
 354         * of the hypervisor interrupt (if any)
 355         */
 356        cppr = ack & 0xff;
 357        he = (ack >> 8) >> 6;
 358        switch(he) {
 359        case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
 360                break;
 361        case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
 362                if (cppr == 0xff)
 363                        return;
 364                /* Mark the priority pending */
 365                xc->pending_prio |= 1 << cppr;
 366
 367                /*
 368                 * A new interrupt should never have a CPPR less favored
 369                 * than our current one.
 370                 */
 371                if (cppr >= xc->cppr)
 372                        pr_err("CPU %d odd ack CPPR, got %d at %d\n",
 373                               smp_processor_id(), cppr, xc->cppr);
 374
 375                /* Update our idea of what the CPPR is */
 376                xc->cppr = cppr;
 377                break;
 378        case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
 379        case TM_QW3_NSR_HE_LSI:  /* Legacy FW LSI (unused) */
 380                pr_err("CPU %d got unexpected interrupt type HE=%d\n",
 381                       smp_processor_id(), he);
 382                return;
 383        }
 384}
 385
 386static void xive_native_eoi(u32 hw_irq)
 387{
 388        /*
 389         * Not normally used except if specific interrupts need
 390         * a workaround on EOI.
 391         */
 392        opal_int_eoi(hw_irq);
 393}
 394
 395static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
 396{
 397        s64 rc;
 398        u32 vp;
 399        __be64 vp_cam_be;
 400        u64 vp_cam;
 401
 402        if (xive_pool_vps == XIVE_INVALID_VP)
 403                return;
 404
 405        /* Check if pool VP already active, if it is, pull it */
 406        if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
 407                in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
 408
 409        /* Enable the pool VP */
 410        vp = xive_pool_vps + cpu;
 411        for (;;) {
 412                rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
 413                if (rc != OPAL_BUSY)
 414                        break;
 415                msleep(OPAL_BUSY_DELAY_MS);
 416        }
 417        if (rc) {
 418                pr_err("Failed to enable pool VP on CPU %d\n", cpu);
 419                return;
 420        }
 421
 422        /* Grab it's CAM value */
 423        rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
 424        if (rc) {
 425                pr_err("Failed to get pool VP info CPU %d\n", cpu);
 426                return;
 427        }
 428        vp_cam = be64_to_cpu(vp_cam_be);
 429
 430        /* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
 431        out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
 432        out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
 433}
 434
 435static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
 436{
 437        s64 rc;
 438        u32 vp;
 439
 440        if (xive_pool_vps == XIVE_INVALID_VP)
 441                return;
 442
 443        /* Pull the pool VP from the CPU */
 444        in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
 445
 446        /* Disable it */
 447        vp = xive_pool_vps + cpu;
 448        for (;;) {
 449                rc = opal_xive_set_vp_info(vp, 0, 0);
 450                if (rc != OPAL_BUSY)
 451                        break;
 452                msleep(OPAL_BUSY_DELAY_MS);
 453        }
 454}
 455
 456void xive_native_sync_source(u32 hw_irq)
 457{
 458        opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
 459}
 460EXPORT_SYMBOL_GPL(xive_native_sync_source);
 461
 462void xive_native_sync_queue(u32 hw_irq)
 463{
 464        opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
 465}
 466EXPORT_SYMBOL_GPL(xive_native_sync_queue);
 467
 468static const struct xive_ops xive_native_ops = {
 469        .populate_irq_data      = xive_native_populate_irq_data,
 470        .configure_irq          = xive_native_configure_irq,
 471        .get_irq_config         = xive_native_get_irq_config,
 472        .setup_queue            = xive_native_setup_queue,
 473        .cleanup_queue          = xive_native_cleanup_queue,
 474        .match                  = xive_native_match,
 475        .shutdown               = xive_native_shutdown,
 476        .update_pending         = xive_native_update_pending,
 477        .eoi                    = xive_native_eoi,
 478        .setup_cpu              = xive_native_setup_cpu,
 479        .teardown_cpu           = xive_native_teardown_cpu,
 480        .sync_source            = xive_native_sync_source,
 481#ifdef CONFIG_SMP
 482        .get_ipi                = xive_native_get_ipi,
 483        .put_ipi                = xive_native_put_ipi,
 484#endif /* CONFIG_SMP */
 485        .name                   = "native",
 486};
 487
 488static bool xive_parse_provisioning(struct device_node *np)
 489{
 490        int rc;
 491
 492        if (of_property_read_u32(np, "ibm,xive-provision-page-size",
 493                                 &xive_provision_size) < 0)
 494                return true;
 495        rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
 496        if (rc < 0) {
 497                pr_err("Error %d getting provision chips array\n", rc);
 498                return false;
 499        }
 500        xive_provision_chip_count = rc;
 501        if (rc == 0)
 502                return true;
 503
 504        xive_provision_chips = kcalloc(4, xive_provision_chip_count,
 505                                       GFP_KERNEL);
 506        if (WARN_ON(!xive_provision_chips))
 507                return false;
 508
 509        rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
 510                                        xive_provision_chips,
 511                                        xive_provision_chip_count);
 512        if (rc < 0) {
 513                pr_err("Error %d reading provision chips array\n", rc);
 514                return false;
 515        }
 516
 517        xive_provision_cache = kmem_cache_create("xive-provision",
 518                                                 xive_provision_size,
 519                                                 xive_provision_size,
 520                                                 0, NULL);
 521        if (!xive_provision_cache) {
 522                pr_err("Failed to allocate provision cache\n");
 523                return false;
 524        }
 525        return true;
 526}
 527
 528static void xive_native_setup_pools(void)
 529{
 530        /* Allocate a pool big enough */
 531        pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
 532
 533        xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
 534        if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
 535                pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
 536
 537        pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
 538                 xive_pool_vps, nr_cpu_ids);
 539}
 540
 541u32 xive_native_default_eq_shift(void)
 542{
 543        return xive_queue_shift;
 544}
 545EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
 546
 547unsigned long xive_tima_os;
 548EXPORT_SYMBOL_GPL(xive_tima_os);
 549
 550bool __init xive_native_init(void)
 551{
 552        struct device_node *np;
 553        struct resource r;
 554        void __iomem *tima;
 555        struct property *prop;
 556        u8 max_prio = 7;
 557        const __be32 *p;
 558        u32 val, cpu;
 559        s64 rc;
 560
 561        if (xive_cmdline_disabled)
 562                return false;
 563
 564        pr_devel("xive_native_init()\n");
 565        np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
 566        if (!np) {
 567                pr_devel("not found !\n");
 568                return false;
 569        }
 570        pr_devel("Found %pOF\n", np);
 571
 572        /* Resource 1 is HV window */
 573        if (of_address_to_resource(np, 1, &r)) {
 574                pr_err("Failed to get thread mgmnt area resource\n");
 575                return false;
 576        }
 577        tima = ioremap(r.start, resource_size(&r));
 578        if (!tima) {
 579                pr_err("Failed to map thread mgmnt area\n");
 580                return false;
 581        }
 582
 583        /* Read number of priorities */
 584        if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
 585                max_prio = val - 1;
 586
 587        /* Iterate the EQ sizes and pick one */
 588        of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
 589                xive_queue_shift = val;
 590                if (val == PAGE_SHIFT)
 591                        break;
 592        }
 593
 594        /* Do we support single escalation */
 595        if (of_get_property(np, "single-escalation-support", NULL) != NULL)
 596                xive_has_single_esc = true;
 597
 598        /* Configure Thread Management areas for KVM */
 599        for_each_possible_cpu(cpu)
 600                kvmppc_set_xive_tima(cpu, r.start, tima);
 601
 602        /* Resource 2 is OS window */
 603        if (of_address_to_resource(np, 2, &r)) {
 604                pr_err("Failed to get thread mgmnt area resource\n");
 605                return false;
 606        }
 607
 608        xive_tima_os = r.start;
 609
 610        /* Grab size of provisionning pages */
 611        xive_parse_provisioning(np);
 612
 613        /* Switch the XIVE to exploitation mode */
 614        rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
 615        if (rc) {
 616                pr_err("Switch to exploitation mode failed with error %lld\n", rc);
 617                return false;
 618        }
 619
 620        /* Setup some dummy HV pool VPs */
 621        xive_native_setup_pools();
 622
 623        /* Initialize XIVE core with our backend */
 624        if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
 625                            max_prio)) {
 626                opal_xive_reset(OPAL_XIVE_MODE_EMU);
 627                return false;
 628        }
 629        pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
 630        return true;
 631}
 632
 633static bool xive_native_provision_pages(void)
 634{
 635        u32 i;
 636        void *p;
 637
 638        for (i = 0; i < xive_provision_chip_count; i++) {
 639                u32 chip = xive_provision_chips[i];
 640
 641                /*
 642                 * XXX TODO: Try to make the allocation local to the node where
 643                 * the chip resides.
 644                 */
 645                p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
 646                if (!p) {
 647                        pr_err("Failed to allocate provisioning page\n");
 648                        return false;
 649                }
 650                opal_xive_donate_page(chip, __pa(p));
 651        }
 652        return true;
 653}
 654
 655u32 xive_native_alloc_vp_block(u32 max_vcpus)
 656{
 657        s64 rc;
 658        u32 order;
 659
 660        order = fls(max_vcpus) - 1;
 661        if (max_vcpus > (1 << order))
 662                order++;
 663
 664        pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
 665                 max_vcpus, order);
 666
 667        for (;;) {
 668                rc = opal_xive_alloc_vp_block(order);
 669                switch (rc) {
 670                case OPAL_BUSY:
 671                        msleep(OPAL_BUSY_DELAY_MS);
 672                        break;
 673                case OPAL_XIVE_PROVISIONING:
 674                        if (!xive_native_provision_pages())
 675                                return XIVE_INVALID_VP;
 676                        break;
 677                default:
 678                        if (rc < 0) {
 679                                pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
 680                                       order, rc);
 681                                return XIVE_INVALID_VP;
 682                        }
 683                        return rc;
 684                }
 685        }
 686}
 687EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
 688
 689void xive_native_free_vp_block(u32 vp_base)
 690{
 691        s64 rc;
 692
 693        if (vp_base == XIVE_INVALID_VP)
 694                return;
 695
 696        rc = opal_xive_free_vp_block(vp_base);
 697        if (rc < 0)
 698                pr_warn("OPAL error %lld freeing VP block\n", rc);
 699}
 700EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
 701
 702int xive_native_enable_vp(u32 vp_id, bool single_escalation)
 703{
 704        s64 rc;
 705        u64 flags = OPAL_XIVE_VP_ENABLED;
 706
 707        if (single_escalation)
 708                flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
 709        for (;;) {
 710                rc = opal_xive_set_vp_info(vp_id, flags, 0);
 711                if (rc != OPAL_BUSY)
 712                        break;
 713                msleep(OPAL_BUSY_DELAY_MS);
 714        }
 715        return rc ? -EIO : 0;
 716}
 717EXPORT_SYMBOL_GPL(xive_native_enable_vp);
 718
 719int xive_native_disable_vp(u32 vp_id)
 720{
 721        s64 rc;
 722
 723        for (;;) {
 724                rc = opal_xive_set_vp_info(vp_id, 0, 0);
 725                if (rc != OPAL_BUSY)
 726                        break;
 727                msleep(OPAL_BUSY_DELAY_MS);
 728        }
 729        return rc ? -EIO : 0;
 730}
 731EXPORT_SYMBOL_GPL(xive_native_disable_vp);
 732
 733int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
 734{
 735        __be64 vp_cam_be;
 736        __be32 vp_chip_id_be;
 737        s64 rc;
 738
 739        rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
 740        if (rc)
 741                return -EIO;
 742        *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
 743        *out_chip_id = be32_to_cpu(vp_chip_id_be);
 744
 745        return 0;
 746}
 747EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
 748
 749bool xive_native_has_single_escalation(void)
 750{
 751        return xive_has_single_esc;
 752}
 753EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
 754
 755int xive_native_get_queue_info(u32 vp_id, u32 prio,
 756                               u64 *out_qpage,
 757                               u64 *out_qsize,
 758                               u64 *out_qeoi_page,
 759                               u32 *out_escalate_irq,
 760                               u64 *out_qflags)
 761{
 762        __be64 qpage;
 763        __be64 qsize;
 764        __be64 qeoi_page;
 765        __be32 escalate_irq;
 766        __be64 qflags;
 767        s64 rc;
 768
 769        rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
 770                                      &qeoi_page, &escalate_irq, &qflags);
 771        if (rc) {
 772                pr_err("OPAL failed to get queue info for VCPU %d/%d : %lld\n",
 773                       vp_id, prio, rc);
 774                return -EIO;
 775        }
 776
 777        if (out_qpage)
 778                *out_qpage = be64_to_cpu(qpage);
 779        if (out_qsize)
 780                *out_qsize = be32_to_cpu(qsize);
 781        if (out_qeoi_page)
 782                *out_qeoi_page = be64_to_cpu(qeoi_page);
 783        if (out_escalate_irq)
 784                *out_escalate_irq = be32_to_cpu(escalate_irq);
 785        if (out_qflags)
 786                *out_qflags = be64_to_cpu(qflags);
 787
 788        return 0;
 789}
 790EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
 791
 792int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
 793{
 794        __be32 opal_qtoggle;
 795        __be32 opal_qindex;
 796        s64 rc;
 797
 798        rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
 799                                       &opal_qindex);
 800        if (rc) {
 801                pr_err("OPAL failed to get queue state for VCPU %d/%d : %lld\n",
 802                       vp_id, prio, rc);
 803                return -EIO;
 804        }
 805
 806        if (qtoggle)
 807                *qtoggle = be32_to_cpu(opal_qtoggle);
 808        if (qindex)
 809                *qindex = be32_to_cpu(opal_qindex);
 810
 811        return 0;
 812}
 813EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
 814
 815int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
 816{
 817        s64 rc;
 818
 819        rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
 820        if (rc) {
 821                pr_err("OPAL failed to set queue state for VCPU %d/%d : %lld\n",
 822                       vp_id, prio, rc);
 823                return -EIO;
 824        }
 825
 826        return 0;
 827}
 828EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
 829
 830bool xive_native_has_queue_state_support(void)
 831{
 832        return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
 833                opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
 834}
 835EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
 836
 837int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
 838{
 839        __be64 state;
 840        s64 rc;
 841
 842        rc = opal_xive_get_vp_state(vp_id, &state);
 843        if (rc) {
 844                pr_err("OPAL failed to get vp state for VCPU %d : %lld\n",
 845                       vp_id, rc);
 846                return -EIO;
 847        }
 848
 849        if (out_state)
 850                *out_state = be64_to_cpu(state);
 851        return 0;
 852}
 853EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
 854
 855machine_arch_initcall(powernv, xive_core_debug_init);
 856