linux/arch/powerpc/sysdev/xive/spapr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright 2016,2017 IBM Corporation.
   4 */
   5
   6#define pr_fmt(fmt) "xive: " fmt
   7
   8#include <linux/types.h>
   9#include <linux/irq.h>
  10#include <linux/smp.h>
  11#include <linux/interrupt.h>
  12#include <linux/init.h>
  13#include <linux/of.h>
  14#include <linux/slab.h>
  15#include <linux/spinlock.h>
  16#include <linux/cpumask.h>
  17#include <linux/mm.h>
  18#include <linux/delay.h>
  19#include <linux/libfdt.h>
  20
  21#include <asm/machdep.h>
  22#include <asm/prom.h>
  23#include <asm/io.h>
  24#include <asm/smp.h>
  25#include <asm/irq.h>
  26#include <asm/errno.h>
  27#include <asm/xive.h>
  28#include <asm/xive-regs.h>
  29#include <asm/hvcall.h>
  30#include <asm/svm.h>
  31#include <asm/ultravisor.h>
  32
  33#include "xive-internal.h"
  34
  35static u32 xive_queue_shift;
  36
  37struct xive_irq_bitmap {
  38        unsigned long           *bitmap;
  39        unsigned int            base;
  40        unsigned int            count;
  41        spinlock_t              lock;
  42        struct list_head        list;
  43};
  44
  45static LIST_HEAD(xive_irq_bitmaps);
  46
  47static int xive_irq_bitmap_add(int base, int count)
  48{
  49        struct xive_irq_bitmap *xibm;
  50
  51        xibm = kzalloc(sizeof(*xibm), GFP_KERNEL);
  52        if (!xibm)
  53                return -ENOMEM;
  54
  55        spin_lock_init(&xibm->lock);
  56        xibm->base = base;
  57        xibm->count = count;
  58        xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL);
  59        if (!xibm->bitmap) {
  60                kfree(xibm);
  61                return -ENOMEM;
  62        }
  63        list_add(&xibm->list, &xive_irq_bitmaps);
  64
  65        pr_info("Using IRQ range [%x-%x]", xibm->base,
  66                xibm->base + xibm->count - 1);
  67        return 0;
  68}
  69
  70static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm)
  71{
  72        int irq;
  73
  74        irq = find_first_zero_bit(xibm->bitmap, xibm->count);
  75        if (irq != xibm->count) {
  76                set_bit(irq, xibm->bitmap);
  77                irq += xibm->base;
  78        } else {
  79                irq = -ENOMEM;
  80        }
  81
  82        return irq;
  83}
  84
  85static int xive_irq_bitmap_alloc(void)
  86{
  87        struct xive_irq_bitmap *xibm;
  88        unsigned long flags;
  89        int irq = -ENOENT;
  90
  91        list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
  92                spin_lock_irqsave(&xibm->lock, flags);
  93                irq = __xive_irq_bitmap_alloc(xibm);
  94                spin_unlock_irqrestore(&xibm->lock, flags);
  95                if (irq >= 0)
  96                        break;
  97        }
  98        return irq;
  99}
 100
 101static void xive_irq_bitmap_free(int irq)
 102{
 103        unsigned long flags;
 104        struct xive_irq_bitmap *xibm;
 105
 106        list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
 107                if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) {
 108                        spin_lock_irqsave(&xibm->lock, flags);
 109                        clear_bit(irq - xibm->base, xibm->bitmap);
 110                        spin_unlock_irqrestore(&xibm->lock, flags);
 111                        break;
 112                }
 113        }
 114}
 115
 116
 117/* Based on the similar routines in RTAS */
 118static unsigned int plpar_busy_delay_time(long rc)
 119{
 120        unsigned int ms = 0;
 121
 122        if (H_IS_LONG_BUSY(rc)) {
 123                ms = get_longbusy_msecs(rc);
 124        } else if (rc == H_BUSY) {
 125                ms = 10; /* seems appropriate for XIVE hcalls */
 126        }
 127
 128        return ms;
 129}
 130
 131static unsigned int plpar_busy_delay(int rc)
 132{
 133        unsigned int ms;
 134
 135        ms = plpar_busy_delay_time(rc);
 136        if (ms)
 137                mdelay(ms);
 138
 139        return ms;
 140}
 141
 142/*
 143 * Note: this call has a partition wide scope and can take a while to
 144 * complete. If it returns H_LONG_BUSY_* it should be retried
 145 * periodically.
 146 */
 147static long plpar_int_reset(unsigned long flags)
 148{
 149        long rc;
 150
 151        do {
 152                rc = plpar_hcall_norets(H_INT_RESET, flags);
 153        } while (plpar_busy_delay(rc));
 154
 155        if (rc)
 156                pr_err("H_INT_RESET failed %ld\n", rc);
 157
 158        return rc;
 159}
 160
 161static long plpar_int_get_source_info(unsigned long flags,
 162                                      unsigned long lisn,
 163                                      unsigned long *src_flags,
 164                                      unsigned long *eoi_page,
 165                                      unsigned long *trig_page,
 166                                      unsigned long *esb_shift)
 167{
 168        unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
 169        long rc;
 170
 171        do {
 172                rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn);
 173        } while (plpar_busy_delay(rc));
 174
 175        if (rc) {
 176                pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn, rc);
 177                return rc;
 178        }
 179
 180        *src_flags = retbuf[0];
 181        *eoi_page  = retbuf[1];
 182        *trig_page = retbuf[2];
 183        *esb_shift = retbuf[3];
 184
 185        pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n",
 186                retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
 187
 188        return 0;
 189}
 190
 191#define XIVE_SRC_SET_EISN (1ull << (63 - 62))
 192#define XIVE_SRC_MASK     (1ull << (63 - 63)) /* unused */
 193
 194static long plpar_int_set_source_config(unsigned long flags,
 195                                        unsigned long lisn,
 196                                        unsigned long target,
 197                                        unsigned long prio,
 198                                        unsigned long sw_irq)
 199{
 200        long rc;
 201
 202
 203        pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n",
 204                flags, lisn, target, prio, sw_irq);
 205
 206
 207        do {
 208                rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn,
 209                                        target, prio, sw_irq);
 210        } while (plpar_busy_delay(rc));
 211
 212        if (rc) {
 213                pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n",
 214                       lisn, target, prio, rc);
 215                return rc;
 216        }
 217
 218        return 0;
 219}
 220
 221static long plpar_int_get_source_config(unsigned long flags,
 222                                        unsigned long lisn,
 223                                        unsigned long *target,
 224                                        unsigned long *prio,
 225                                        unsigned long *sw_irq)
 226{
 227        unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
 228        long rc;
 229
 230        pr_devel("H_INT_GET_SOURCE_CONFIG flags=%lx lisn=%lx\n", flags, lisn);
 231
 232        do {
 233                rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn,
 234                                 target, prio, sw_irq);
 235        } while (plpar_busy_delay(rc));
 236
 237        if (rc) {
 238                pr_err("H_INT_GET_SOURCE_CONFIG lisn=%ld failed %ld\n",
 239                       lisn, rc);
 240                return rc;
 241        }
 242
 243        *target = retbuf[0];
 244        *prio   = retbuf[1];
 245        *sw_irq = retbuf[2];
 246
 247        pr_devel("H_INT_GET_SOURCE_CONFIG target=%lx prio=%lx sw_irq=%lx\n",
 248                retbuf[0], retbuf[1], retbuf[2]);
 249
 250        return 0;
 251}
 252
 253static long plpar_int_get_queue_info(unsigned long flags,
 254                                     unsigned long target,
 255                                     unsigned long priority,
 256                                     unsigned long *esn_page,
 257                                     unsigned long *esn_size)
 258{
 259        unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
 260        long rc;
 261
 262        do {
 263                rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target,
 264                                 priority);
 265        } while (plpar_busy_delay(rc));
 266
 267        if (rc) {
 268                pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
 269                       target, priority, rc);
 270                return rc;
 271        }
 272
 273        *esn_page = retbuf[0];
 274        *esn_size = retbuf[1];
 275
 276        pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n",
 277                retbuf[0], retbuf[1]);
 278
 279        return 0;
 280}
 281
 282#define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
 283
 284static long plpar_int_set_queue_config(unsigned long flags,
 285                                       unsigned long target,
 286                                       unsigned long priority,
 287                                       unsigned long qpage,
 288                                       unsigned long qsize)
 289{
 290        long rc;
 291
 292        pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n",
 293                flags,  target, priority, qpage, qsize);
 294
 295        do {
 296                rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
 297                                        priority, qpage, qsize);
 298        } while (plpar_busy_delay(rc));
 299
 300        if (rc) {
 301                pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n",
 302                       target, priority, qpage, rc);
 303                return  rc;
 304        }
 305
 306        return 0;
 307}
 308
 309static long plpar_int_sync(unsigned long flags, unsigned long lisn)
 310{
 311        long rc;
 312
 313        do {
 314                rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn);
 315        } while (plpar_busy_delay(rc));
 316
 317        if (rc) {
 318                pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn, rc);
 319                return  rc;
 320        }
 321
 322        return 0;
 323}
 324
 325#define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
 326
 327static long plpar_int_esb(unsigned long flags,
 328                          unsigned long lisn,
 329                          unsigned long offset,
 330                          unsigned long in_data,
 331                          unsigned long *out_data)
 332{
 333        unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
 334        long rc;
 335
 336        pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n",
 337                flags,  lisn, offset, in_data);
 338
 339        do {
 340                rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset,
 341                                 in_data);
 342        } while (plpar_busy_delay(rc));
 343
 344        if (rc) {
 345                pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n",
 346                       lisn, offset, rc);
 347                return  rc;
 348        }
 349
 350        *out_data = retbuf[0];
 351
 352        return 0;
 353}
 354
 355static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write)
 356{
 357        unsigned long read_data;
 358        long rc;
 359
 360        rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0,
 361                           lisn, offset, data, &read_data);
 362        if (rc)
 363                return -1;
 364
 365        return write ? 0 : read_data;
 366}
 367
 368#define XIVE_SRC_H_INT_ESB     (1ull << (63 - 60))
 369#define XIVE_SRC_LSI           (1ull << (63 - 61))
 370#define XIVE_SRC_TRIGGER       (1ull << (63 - 62))
 371#define XIVE_SRC_STORE_EOI     (1ull << (63 - 63))
 372
 373static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
 374{
 375        long rc;
 376        unsigned long flags;
 377        unsigned long eoi_page;
 378        unsigned long trig_page;
 379        unsigned long esb_shift;
 380
 381        memset(data, 0, sizeof(*data));
 382
 383        rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page,
 384                                       &esb_shift);
 385        if (rc)
 386                return  -EINVAL;
 387
 388        if (flags & XIVE_SRC_H_INT_ESB)
 389                data->flags  |= XIVE_IRQ_FLAG_H_INT_ESB;
 390        if (flags & XIVE_SRC_STORE_EOI)
 391                data->flags  |= XIVE_IRQ_FLAG_STORE_EOI;
 392        if (flags & XIVE_SRC_LSI)
 393                data->flags  |= XIVE_IRQ_FLAG_LSI;
 394        data->eoi_page  = eoi_page;
 395        data->esb_shift = esb_shift;
 396        data->trig_page = trig_page;
 397
 398        data->hw_irq = hw_irq;
 399
 400        /*
 401         * No chip-id for the sPAPR backend. This has an impact how we
 402         * pick a target. See xive_pick_irq_target().
 403         */
 404        data->src_chip = XIVE_INVALID_CHIP_ID;
 405
 406        /*
 407         * When the H_INT_ESB flag is set, the H_INT_ESB hcall should
 408         * be used for interrupt management. Skip the remapping of the
 409         * ESB pages which are not available.
 410         */
 411        if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB)
 412                return 0;
 413
 414        data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
 415        if (!data->eoi_mmio) {
 416                pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
 417                return -ENOMEM;
 418        }
 419
 420        /* Full function page supports trigger */
 421        if (flags & XIVE_SRC_TRIGGER) {
 422                data->trig_mmio = data->eoi_mmio;
 423                return 0;
 424        }
 425
 426        data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
 427        if (!data->trig_mmio) {
 428                pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
 429                return -ENOMEM;
 430        }
 431        return 0;
 432}
 433
 434static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
 435{
 436        long rc;
 437
 438        rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target,
 439                                         prio, sw_irq);
 440
 441        return rc == 0 ? 0 : -ENXIO;
 442}
 443
 444static int xive_spapr_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
 445                                     u32 *sw_irq)
 446{
 447        long rc;
 448        unsigned long h_target;
 449        unsigned long h_prio;
 450        unsigned long h_sw_irq;
 451
 452        rc = plpar_int_get_source_config(0, hw_irq, &h_target, &h_prio,
 453                                         &h_sw_irq);
 454
 455        *target = h_target;
 456        *prio = h_prio;
 457        *sw_irq = h_sw_irq;
 458
 459        return rc == 0 ? 0 : -ENXIO;
 460}
 461
 462/* This can be called multiple time to change a queue configuration */
 463static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
 464                                   __be32 *qpage, u32 order)
 465{
 466        s64 rc = 0;
 467        unsigned long esn_page;
 468        unsigned long esn_size;
 469        u64 flags, qpage_phys;
 470
 471        /* If there's an actual queue page, clean it */
 472        if (order) {
 473                if (WARN_ON(!qpage))
 474                        return -EINVAL;
 475                qpage_phys = __pa(qpage);
 476        } else {
 477                qpage_phys = 0;
 478        }
 479
 480        /* Initialize the rest of the fields */
 481        q->msk = order ? ((1u << (order - 2)) - 1) : 0;
 482        q->idx = 0;
 483        q->toggle = 0;
 484
 485        rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
 486        if (rc) {
 487                pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
 488                       target, prio);
 489                rc = -EIO;
 490                goto fail;
 491        }
 492
 493        /* TODO: add support for the notification page */
 494        q->eoi_phys = esn_page;
 495
 496        /* Default is to always notify */
 497        flags = XIVE_EQ_ALWAYS_NOTIFY;
 498
 499        /* Configure and enable the queue in HW */
 500        rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
 501        if (rc) {
 502                pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
 503                       target, prio);
 504                rc = -EIO;
 505        } else {
 506                q->qpage = qpage;
 507                if (is_secure_guest())
 508                        uv_share_page(PHYS_PFN(qpage_phys),
 509                                        1 << xive_alloc_order(order));
 510        }
 511fail:
 512        return rc;
 513}
 514
 515static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
 516                                  u8 prio)
 517{
 518        struct xive_q *q = &xc->queue[prio];
 519        __be32 *qpage;
 520
 521        qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
 522        if (IS_ERR(qpage))
 523                return PTR_ERR(qpage);
 524
 525        return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
 526                                          q, prio, qpage, xive_queue_shift);
 527}
 528
 529static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
 530                                  u8 prio)
 531{
 532        struct xive_q *q = &xc->queue[prio];
 533        unsigned int alloc_order;
 534        long rc;
 535        int hw_cpu = get_hard_smp_processor_id(cpu);
 536
 537        rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
 538        if (rc)
 539                pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
 540                       hw_cpu, prio);
 541
 542        alloc_order = xive_alloc_order(xive_queue_shift);
 543        if (is_secure_guest())
 544                uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order);
 545        free_pages((unsigned long)q->qpage, alloc_order);
 546        q->qpage = NULL;
 547}
 548
 549static bool xive_spapr_match(struct device_node *node)
 550{
 551        /* Ignore cascaded controllers for the moment */
 552        return true;
 553}
 554
 555#ifdef CONFIG_SMP
 556static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
 557{
 558        int irq = xive_irq_bitmap_alloc();
 559
 560        if (irq < 0) {
 561                pr_err("Failed to allocate IPI on CPU %d\n", cpu);
 562                return -ENXIO;
 563        }
 564
 565        xc->hw_ipi = irq;
 566        return 0;
 567}
 568
 569static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
 570{
 571        if (xc->hw_ipi == XIVE_BAD_IRQ)
 572                return;
 573
 574        xive_irq_bitmap_free(xc->hw_ipi);
 575        xc->hw_ipi = XIVE_BAD_IRQ;
 576}
 577#endif /* CONFIG_SMP */
 578
 579static void xive_spapr_shutdown(void)
 580{
 581        plpar_int_reset(0);
 582}
 583
 584/*
 585 * Perform an "ack" cycle on the current thread. Grab the pending
 586 * active priorities and update the CPPR to the most favored one.
 587 */
 588static void xive_spapr_update_pending(struct xive_cpu *xc)
 589{
 590        u8 nsr, cppr;
 591        u16 ack;
 592
 593        /*
 594         * Perform the "Acknowledge O/S to Register" cycle.
 595         *
 596         * Let's speedup the access to the TIMA using the raw I/O
 597         * accessor as we don't need the synchronisation routine of
 598         * the higher level ones
 599         */
 600        ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
 601
 602        /* Synchronize subsequent queue accesses */
 603        mb();
 604
 605        /*
 606         * Grab the CPPR and the "NSR" field which indicates the source
 607         * of the interrupt (if any)
 608         */
 609        cppr = ack & 0xff;
 610        nsr = ack >> 8;
 611
 612        if (nsr & TM_QW1_NSR_EO) {
 613                if (cppr == 0xff)
 614                        return;
 615                /* Mark the priority pending */
 616                xc->pending_prio |= 1 << cppr;
 617
 618                /*
 619                 * A new interrupt should never have a CPPR less favored
 620                 * than our current one.
 621                 */
 622                if (cppr >= xc->cppr)
 623                        pr_err("CPU %d odd ack CPPR, got %d at %d\n",
 624                               smp_processor_id(), cppr, xc->cppr);
 625
 626                /* Update our idea of what the CPPR is */
 627                xc->cppr = cppr;
 628        }
 629}
 630
 631static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
 632{
 633        /* Only some debug on the TIMA settings */
 634        pr_debug("(HW value: %08x %08x %08x)\n",
 635                 in_be32(xive_tima + TM_QW1_OS + TM_WORD0),
 636                 in_be32(xive_tima + TM_QW1_OS + TM_WORD1),
 637                 in_be32(xive_tima + TM_QW1_OS + TM_WORD2));
 638}
 639
 640static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
 641{
 642        /* Nothing to do */;
 643}
 644
 645static void xive_spapr_sync_source(u32 hw_irq)
 646{
 647        /* Specs are unclear on what this is doing */
 648        plpar_int_sync(0, hw_irq);
 649}
 650
 651static int xive_spapr_debug_show(struct seq_file *m, void *private)
 652{
 653        struct xive_irq_bitmap *xibm;
 654        char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
 655
 656        list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
 657                memset(buf, 0, PAGE_SIZE);
 658                bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count);
 659                seq_printf(m, "bitmap #%d: %s", xibm->count, buf);
 660        }
 661        kfree(buf);
 662
 663        return 0;
 664}
 665
 666static const struct xive_ops xive_spapr_ops = {
 667        .populate_irq_data      = xive_spapr_populate_irq_data,
 668        .configure_irq          = xive_spapr_configure_irq,
 669        .get_irq_config         = xive_spapr_get_irq_config,
 670        .setup_queue            = xive_spapr_setup_queue,
 671        .cleanup_queue          = xive_spapr_cleanup_queue,
 672        .match                  = xive_spapr_match,
 673        .shutdown               = xive_spapr_shutdown,
 674        .update_pending         = xive_spapr_update_pending,
 675        .setup_cpu              = xive_spapr_setup_cpu,
 676        .teardown_cpu           = xive_spapr_teardown_cpu,
 677        .sync_source            = xive_spapr_sync_source,
 678        .esb_rw                 = xive_spapr_esb_rw,
 679#ifdef CONFIG_SMP
 680        .get_ipi                = xive_spapr_get_ipi,
 681        .put_ipi                = xive_spapr_put_ipi,
 682        .debug_show             = xive_spapr_debug_show,
 683#endif /* CONFIG_SMP */
 684        .name                   = "spapr",
 685};
 686
 687/*
 688 * get max priority from "/ibm,plat-res-int-priorities"
 689 */
 690static bool xive_get_max_prio(u8 *max_prio)
 691{
 692        struct device_node *rootdn;
 693        const __be32 *reg;
 694        u32 len;
 695        int prio, found;
 696
 697        rootdn = of_find_node_by_path("/");
 698        if (!rootdn) {
 699                pr_err("not root node found !\n");
 700                return false;
 701        }
 702
 703        reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
 704        if (!reg) {
 705                pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
 706                return false;
 707        }
 708
 709        if (len % (2 * sizeof(u32)) != 0) {
 710                pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
 711                return false;
 712        }
 713
 714        /* HW supports priorities in the range [0-7] and 0xFF is a
 715         * wildcard priority used to mask. We scan the ranges reserved
 716         * by the hypervisor to find the lowest priority we can use.
 717         */
 718        found = 0xFF;
 719        for (prio = 0; prio < 8; prio++) {
 720                int reserved = 0;
 721                int i;
 722
 723                for (i = 0; i < len / (2 * sizeof(u32)); i++) {
 724                        int base  = be32_to_cpu(reg[2 * i]);
 725                        int range = be32_to_cpu(reg[2 * i + 1]);
 726
 727                        if (prio >= base && prio < base + range)
 728                                reserved++;
 729                }
 730
 731                if (!reserved)
 732                        found = prio;
 733        }
 734
 735        if (found == 0xFF) {
 736                pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
 737                return false;
 738        }
 739
 740        *max_prio = found;
 741        return true;
 742}
 743
 744static const u8 *get_vec5_feature(unsigned int index)
 745{
 746        unsigned long root, chosen;
 747        int size;
 748        const u8 *vec5;
 749
 750        root = of_get_flat_dt_root();
 751        chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
 752        if (chosen == -FDT_ERR_NOTFOUND)
 753                return NULL;
 754
 755        vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
 756        if (!vec5)
 757                return NULL;
 758
 759        if (size <= index)
 760                return NULL;
 761
 762        return vec5 + index;
 763}
 764
 765static bool __init xive_spapr_disabled(void)
 766{
 767        const u8 *vec5_xive;
 768
 769        vec5_xive = get_vec5_feature(OV5_INDX(OV5_XIVE_SUPPORT));
 770        if (vec5_xive) {
 771                u8 val;
 772
 773                val = *vec5_xive & OV5_FEAT(OV5_XIVE_SUPPORT);
 774                switch (val) {
 775                case OV5_FEAT(OV5_XIVE_EITHER):
 776                case OV5_FEAT(OV5_XIVE_LEGACY):
 777                        break;
 778                case OV5_FEAT(OV5_XIVE_EXPLOIT):
 779                        /* Hypervisor only supports XIVE */
 780                        if (xive_cmdline_disabled)
 781                                pr_warn("WARNING: Ignoring cmdline option xive=off\n");
 782                        return false;
 783                default:
 784                        pr_warn("%s: Unknown xive support option: 0x%x\n",
 785                                __func__, val);
 786                        break;
 787                }
 788        }
 789
 790        return xive_cmdline_disabled;
 791}
 792
 793bool __init xive_spapr_init(void)
 794{
 795        struct device_node *np;
 796        struct resource r;
 797        void __iomem *tima;
 798        struct property *prop;
 799        u8 max_prio;
 800        u32 val;
 801        u32 len;
 802        const __be32 *reg;
 803        int i;
 804
 805        if (xive_spapr_disabled())
 806                return false;
 807
 808        pr_devel("%s()\n", __func__);
 809        np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe");
 810        if (!np) {
 811                pr_devel("not found !\n");
 812                return false;
 813        }
 814        pr_devel("Found %s\n", np->full_name);
 815
 816        /* Resource 1 is the OS ring TIMA */
 817        if (of_address_to_resource(np, 1, &r)) {
 818                pr_err("Failed to get thread mgmnt area resource\n");
 819                return false;
 820        }
 821        tima = ioremap(r.start, resource_size(&r));
 822        if (!tima) {
 823                pr_err("Failed to map thread mgmnt area\n");
 824                return false;
 825        }
 826
 827        if (!xive_get_max_prio(&max_prio))
 828                return false;
 829
 830        /* Feed the IRQ number allocator with the ranges given in the DT */
 831        reg = of_get_property(np, "ibm,xive-lisn-ranges", &len);
 832        if (!reg) {
 833                pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
 834                return false;
 835        }
 836
 837        if (len % (2 * sizeof(u32)) != 0) {
 838                pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
 839                return false;
 840        }
 841
 842        for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2)
 843                xive_irq_bitmap_add(be32_to_cpu(reg[0]),
 844                                    be32_to_cpu(reg[1]));
 845
 846        /* Iterate the EQ sizes and pick one */
 847        of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) {
 848                xive_queue_shift = val;
 849                if (val == PAGE_SHIFT)
 850                        break;
 851        }
 852
 853        /* Initialize XIVE core with our backend */
 854        if (!xive_core_init(np, &xive_spapr_ops, tima, TM_QW1_OS, max_prio))
 855                return false;
 856
 857        pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
 858        return true;
 859}
 860
 861machine_arch_initcall(pseries, xive_core_debug_init);
 862