linux/arch/powerpc/sysdev/mpic.c
<<
>>
Prefs
   1/*
   2 *  arch/powerpc/kernel/mpic.c
   3 *
   4 *  Driver for interrupt controllers following the OpenPIC standard, the
   5 *  common implementation beeing IBM's MPIC. This driver also can deal
   6 *  with various broken implementations of this HW.
   7 *
   8 *  Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
   9 *  Copyright 2010-2012 Freescale Semiconductor, Inc.
  10 *
  11 *  This file is subject to the terms and conditions of the GNU General Public
  12 *  License.  See the file COPYING in the main directory of this archive
  13 *  for more details.
  14 */
  15
  16#undef DEBUG
  17#undef DEBUG_IPI
  18#undef DEBUG_IRQ
  19#undef DEBUG_LOW
  20
  21#include <linux/types.h>
  22#include <linux/kernel.h>
  23#include <linux/init.h>
  24#include <linux/irq.h>
  25#include <linux/smp.h>
  26#include <linux/interrupt.h>
  27#include <linux/bootmem.h>
  28#include <linux/spinlock.h>
  29#include <linux/pci.h>
  30#include <linux/slab.h>
  31#include <linux/syscore_ops.h>
  32#include <linux/ratelimit.h>
  33
  34#include <asm/ptrace.h>
  35#include <asm/signal.h>
  36#include <asm/io.h>
  37#include <asm/pgtable.h>
  38#include <asm/irq.h>
  39#include <asm/machdep.h>
  40#include <asm/mpic.h>
  41#include <asm/smp.h>
  42
  43#include "mpic.h"
  44
  45#ifdef DEBUG
  46#define DBG(fmt...) printk(fmt)
  47#else
  48#define DBG(fmt...)
  49#endif
  50
  51static struct mpic *mpics;
  52static struct mpic *mpic_primary;
  53static DEFINE_RAW_SPINLOCK(mpic_lock);
  54
  55#ifdef CONFIG_PPC32     /* XXX for now */
  56#ifdef CONFIG_IRQ_ALL_CPUS
  57#define distribute_irqs (1)
  58#else
  59#define distribute_irqs (0)
  60#endif
  61#endif
  62
  63#ifdef CONFIG_MPIC_WEIRD
  64static u32 mpic_infos[][MPIC_IDX_END] = {
  65        [0] = { /* Original OpenPIC compatible MPIC */
  66                MPIC_GREG_BASE,
  67                MPIC_GREG_FEATURE_0,
  68                MPIC_GREG_GLOBAL_CONF_0,
  69                MPIC_GREG_VENDOR_ID,
  70                MPIC_GREG_IPI_VECTOR_PRI_0,
  71                MPIC_GREG_IPI_STRIDE,
  72                MPIC_GREG_SPURIOUS,
  73                MPIC_GREG_TIMER_FREQ,
  74
  75                MPIC_TIMER_BASE,
  76                MPIC_TIMER_STRIDE,
  77                MPIC_TIMER_CURRENT_CNT,
  78                MPIC_TIMER_BASE_CNT,
  79                MPIC_TIMER_VECTOR_PRI,
  80                MPIC_TIMER_DESTINATION,
  81
  82                MPIC_CPU_BASE,
  83                MPIC_CPU_STRIDE,
  84                MPIC_CPU_IPI_DISPATCH_0,
  85                MPIC_CPU_IPI_DISPATCH_STRIDE,
  86                MPIC_CPU_CURRENT_TASK_PRI,
  87                MPIC_CPU_WHOAMI,
  88                MPIC_CPU_INTACK,
  89                MPIC_CPU_EOI,
  90                MPIC_CPU_MCACK,
  91
  92                MPIC_IRQ_BASE,
  93                MPIC_IRQ_STRIDE,
  94                MPIC_IRQ_VECTOR_PRI,
  95                MPIC_VECPRI_VECTOR_MASK,
  96                MPIC_VECPRI_POLARITY_POSITIVE,
  97                MPIC_VECPRI_POLARITY_NEGATIVE,
  98                MPIC_VECPRI_SENSE_LEVEL,
  99                MPIC_VECPRI_SENSE_EDGE,
 100                MPIC_VECPRI_POLARITY_MASK,
 101                MPIC_VECPRI_SENSE_MASK,
 102                MPIC_IRQ_DESTINATION
 103        },
 104        [1] = { /* Tsi108/109 PIC */
 105                TSI108_GREG_BASE,
 106                TSI108_GREG_FEATURE_0,
 107                TSI108_GREG_GLOBAL_CONF_0,
 108                TSI108_GREG_VENDOR_ID,
 109                TSI108_GREG_IPI_VECTOR_PRI_0,
 110                TSI108_GREG_IPI_STRIDE,
 111                TSI108_GREG_SPURIOUS,
 112                TSI108_GREG_TIMER_FREQ,
 113
 114                TSI108_TIMER_BASE,
 115                TSI108_TIMER_STRIDE,
 116                TSI108_TIMER_CURRENT_CNT,
 117                TSI108_TIMER_BASE_CNT,
 118                TSI108_TIMER_VECTOR_PRI,
 119                TSI108_TIMER_DESTINATION,
 120
 121                TSI108_CPU_BASE,
 122                TSI108_CPU_STRIDE,
 123                TSI108_CPU_IPI_DISPATCH_0,
 124                TSI108_CPU_IPI_DISPATCH_STRIDE,
 125                TSI108_CPU_CURRENT_TASK_PRI,
 126                TSI108_CPU_WHOAMI,
 127                TSI108_CPU_INTACK,
 128                TSI108_CPU_EOI,
 129                TSI108_CPU_MCACK,
 130
 131                TSI108_IRQ_BASE,
 132                TSI108_IRQ_STRIDE,
 133                TSI108_IRQ_VECTOR_PRI,
 134                TSI108_VECPRI_VECTOR_MASK,
 135                TSI108_VECPRI_POLARITY_POSITIVE,
 136                TSI108_VECPRI_POLARITY_NEGATIVE,
 137                TSI108_VECPRI_SENSE_LEVEL,
 138                TSI108_VECPRI_SENSE_EDGE,
 139                TSI108_VECPRI_POLARITY_MASK,
 140                TSI108_VECPRI_SENSE_MASK,
 141                TSI108_IRQ_DESTINATION
 142        },
 143};
 144
 145#define MPIC_INFO(name) mpic->hw_set[MPIC_IDX_##name]
 146
 147#else /* CONFIG_MPIC_WEIRD */
 148
 149#define MPIC_INFO(name) MPIC_##name
 150
 151#endif /* CONFIG_MPIC_WEIRD */
 152
 153static inline unsigned int mpic_processor_id(struct mpic *mpic)
 154{
 155        unsigned int cpu = 0;
 156
 157        if (!(mpic->flags & MPIC_SECONDARY))
 158                cpu = hard_smp_processor_id();
 159
 160        return cpu;
 161}
 162
 163/*
 164 * Register accessor functions
 165 */
 166
 167
 168static inline u32 _mpic_read(enum mpic_reg_type type,
 169                             struct mpic_reg_bank *rb,
 170                             unsigned int reg)
 171{
 172        switch(type) {
 173#ifdef CONFIG_PPC_DCR
 174        case mpic_access_dcr:
 175                return dcr_read(rb->dhost, reg);
 176#endif
 177        case mpic_access_mmio_be:
 178                return in_be32(rb->base + (reg >> 2));
 179        case mpic_access_mmio_le:
 180        default:
 181                return in_le32(rb->base + (reg >> 2));
 182        }
 183}
 184
 185static inline void _mpic_write(enum mpic_reg_type type,
 186                               struct mpic_reg_bank *rb,
 187                               unsigned int reg, u32 value)
 188{
 189        switch(type) {
 190#ifdef CONFIG_PPC_DCR
 191        case mpic_access_dcr:
 192                dcr_write(rb->dhost, reg, value);
 193                break;
 194#endif
 195        case mpic_access_mmio_be:
 196                out_be32(rb->base + (reg >> 2), value);
 197                break;
 198        case mpic_access_mmio_le:
 199        default:
 200                out_le32(rb->base + (reg >> 2), value);
 201                break;
 202        }
 203}
 204
 205static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
 206{
 207        enum mpic_reg_type type = mpic->reg_type;
 208        unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
 209                              (ipi * MPIC_INFO(GREG_IPI_STRIDE));
 210
 211        if ((mpic->flags & MPIC_BROKEN_IPI) && type == mpic_access_mmio_le)
 212                type = mpic_access_mmio_be;
 213        return _mpic_read(type, &mpic->gregs, offset);
 214}
 215
 216static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
 217{
 218        unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
 219                              (ipi * MPIC_INFO(GREG_IPI_STRIDE));
 220
 221        _mpic_write(mpic->reg_type, &mpic->gregs, offset, value);
 222}
 223
 224static inline unsigned int mpic_tm_offset(struct mpic *mpic, unsigned int tm)
 225{
 226        return (tm >> 2) * MPIC_TIMER_GROUP_STRIDE +
 227               (tm & 3) * MPIC_INFO(TIMER_STRIDE);
 228}
 229
 230static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm)
 231{
 232        unsigned int offset = mpic_tm_offset(mpic, tm) +
 233                              MPIC_INFO(TIMER_VECTOR_PRI);
 234
 235        return _mpic_read(mpic->reg_type, &mpic->tmregs, offset);
 236}
 237
 238static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value)
 239{
 240        unsigned int offset = mpic_tm_offset(mpic, tm) +
 241                              MPIC_INFO(TIMER_VECTOR_PRI);
 242
 243        _mpic_write(mpic->reg_type, &mpic->tmregs, offset, value);
 244}
 245
 246static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
 247{
 248        unsigned int cpu = mpic_processor_id(mpic);
 249
 250        return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg);
 251}
 252
 253static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
 254{
 255        unsigned int cpu = mpic_processor_id(mpic);
 256
 257        _mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value);
 258}
 259
 260static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
 261{
 262        unsigned int    isu = src_no >> mpic->isu_shift;
 263        unsigned int    idx = src_no & mpic->isu_mask;
 264        unsigned int    val;
 265
 266        val = _mpic_read(mpic->reg_type, &mpic->isus[isu],
 267                         reg + (idx * MPIC_INFO(IRQ_STRIDE)));
 268#ifdef CONFIG_MPIC_BROKEN_REGREAD
 269        if (reg == 0)
 270                val = (val & (MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY)) |
 271                        mpic->isu_reg0_shadow[src_no];
 272#endif
 273        return val;
 274}
 275
 276static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
 277                                   unsigned int reg, u32 value)
 278{
 279        unsigned int    isu = src_no >> mpic->isu_shift;
 280        unsigned int    idx = src_no & mpic->isu_mask;
 281
 282        _mpic_write(mpic->reg_type, &mpic->isus[isu],
 283                    reg + (idx * MPIC_INFO(IRQ_STRIDE)), value);
 284
 285#ifdef CONFIG_MPIC_BROKEN_REGREAD
 286        if (reg == 0)
 287                mpic->isu_reg0_shadow[src_no] =
 288                        value & ~(MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY);
 289#endif
 290}
 291
 292#define mpic_read(b,r)          _mpic_read(mpic->reg_type,&(b),(r))
 293#define mpic_write(b,r,v)       _mpic_write(mpic->reg_type,&(b),(r),(v))
 294#define mpic_ipi_read(i)        _mpic_ipi_read(mpic,(i))
 295#define mpic_ipi_write(i,v)     _mpic_ipi_write(mpic,(i),(v))
 296#define mpic_tm_read(i)         _mpic_tm_read(mpic,(i))
 297#define mpic_tm_write(i,v)      _mpic_tm_write(mpic,(i),(v))
 298#define mpic_cpu_read(i)        _mpic_cpu_read(mpic,(i))
 299#define mpic_cpu_write(i,v)     _mpic_cpu_write(mpic,(i),(v))
 300#define mpic_irq_read(s,r)      _mpic_irq_read(mpic,(s),(r))
 301#define mpic_irq_write(s,r,v)   _mpic_irq_write(mpic,(s),(r),(v))
 302
 303
 304/*
 305 * Low level utility functions
 306 */
 307
 308
 309static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr,
 310                           struct mpic_reg_bank *rb, unsigned int offset,
 311                           unsigned int size)
 312{
 313        rb->base = ioremap(phys_addr + offset, size);
 314        BUG_ON(rb->base == NULL);
 315}
 316
 317#ifdef CONFIG_PPC_DCR
 318static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
 319                          unsigned int offset, unsigned int size)
 320{
 321        phys_addr_t phys_addr = dcr_resource_start(mpic->node, 0);
 322        rb->dhost = dcr_map(mpic->node, phys_addr + offset, size);
 323        BUG_ON(!DCR_MAP_OK(rb->dhost));
 324}
 325
 326static inline void mpic_map(struct mpic *mpic,
 327                            phys_addr_t phys_addr, struct mpic_reg_bank *rb,
 328                            unsigned int offset, unsigned int size)
 329{
 330        if (mpic->flags & MPIC_USES_DCR)
 331                _mpic_map_dcr(mpic, rb, offset, size);
 332        else
 333                _mpic_map_mmio(mpic, phys_addr, rb, offset, size);
 334}
 335#else /* CONFIG_PPC_DCR */
 336#define mpic_map(m,p,b,o,s)     _mpic_map_mmio(m,p,b,o,s)
 337#endif /* !CONFIG_PPC_DCR */
 338
 339
 340
 341/* Check if we have one of those nice broken MPICs with a flipped endian on
 342 * reads from IPI registers
 343 */
 344static void __init mpic_test_broken_ipi(struct mpic *mpic)
 345{
 346        u32 r;
 347
 348        mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK);
 349        r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0));
 350
 351        if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
 352                printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
 353                mpic->flags |= MPIC_BROKEN_IPI;
 354        }
 355}
 356
 357#ifdef CONFIG_MPIC_U3_HT_IRQS
 358
 359/* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
 360 * to force the edge setting on the MPIC and do the ack workaround.
 361 */
 362static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
 363{
 364        if (source >= 128 || !mpic->fixups)
 365                return 0;
 366        return mpic->fixups[source].base != NULL;
 367}
 368
 369
 370static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
 371{
 372        struct mpic_irq_fixup *fixup = &mpic->fixups[source];
 373
 374        if (fixup->applebase) {
 375                unsigned int soff = (fixup->index >> 3) & ~3;
 376                unsigned int mask = 1U << (fixup->index & 0x1f);
 377                writel(mask, fixup->applebase + soff);
 378        } else {
 379                raw_spin_lock(&mpic->fixup_lock);
 380                writeb(0x11 + 2 * fixup->index, fixup->base + 2);
 381                writel(fixup->data, fixup->base + 4);
 382                raw_spin_unlock(&mpic->fixup_lock);
 383        }
 384}
 385
 386static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
 387                                      bool level)
 388{
 389        struct mpic_irq_fixup *fixup = &mpic->fixups[source];
 390        unsigned long flags;
 391        u32 tmp;
 392
 393        if (fixup->base == NULL)
 394                return;
 395
 396        DBG("startup_ht_interrupt(0x%x) index: %d\n",
 397            source, fixup->index);
 398        raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
 399        /* Enable and configure */
 400        writeb(0x10 + 2 * fixup->index, fixup->base + 2);
 401        tmp = readl(fixup->base + 4);
 402        tmp &= ~(0x23U);
 403        if (level)
 404                tmp |= 0x22;
 405        writel(tmp, fixup->base + 4);
 406        raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags);
 407
 408#ifdef CONFIG_PM
 409        /* use the lowest bit inverted to the actual HW,
 410         * set if this fixup was enabled, clear otherwise */
 411        mpic->save_data[source].fixup_data = tmp | 1;
 412#endif
 413}
 414
 415static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source)
 416{
 417        struct mpic_irq_fixup *fixup = &mpic->fixups[source];
 418        unsigned long flags;
 419        u32 tmp;
 420
 421        if (fixup->base == NULL)
 422                return;
 423
 424        DBG("shutdown_ht_interrupt(0x%x)\n", source);
 425
 426        /* Disable */
 427        raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
 428        writeb(0x10 + 2 * fixup->index, fixup->base + 2);
 429        tmp = readl(fixup->base + 4);
 430        tmp |= 1;
 431        writel(tmp, fixup->base + 4);
 432        raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags);
 433
 434#ifdef CONFIG_PM
 435        /* use the lowest bit inverted to the actual HW,
 436         * set if this fixup was enabled, clear otherwise */
 437        mpic->save_data[source].fixup_data = tmp & ~1;
 438#endif
 439}
 440
 441#ifdef CONFIG_PCI_MSI
 442static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase,
 443                                    unsigned int devfn)
 444{
 445        u8 __iomem *base;
 446        u8 pos, flags;
 447        u64 addr = 0;
 448
 449        for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
 450             pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
 451                u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
 452                if (id == PCI_CAP_ID_HT) {
 453                        id = readb(devbase + pos + 3);
 454                        if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_MSI_MAPPING)
 455                                break;
 456                }
 457        }
 458
 459        if (pos == 0)
 460                return;
 461
 462        base = devbase + pos;
 463
 464        flags = readb(base + HT_MSI_FLAGS);
 465        if (!(flags & HT_MSI_FLAGS_FIXED)) {
 466                addr = readl(base + HT_MSI_ADDR_LO) & HT_MSI_ADDR_LO_MASK;
 467                addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32);
 468        }
 469
 470        printk(KERN_DEBUG "mpic:   - HT:%02x.%x %s MSI mapping found @ 0x%llx\n",
 471                PCI_SLOT(devfn), PCI_FUNC(devfn),
 472                flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr);
 473
 474        if (!(flags & HT_MSI_FLAGS_ENABLE))
 475                writeb(flags | HT_MSI_FLAGS_ENABLE, base + HT_MSI_FLAGS);
 476}
 477#else
 478static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase,
 479                                    unsigned int devfn)
 480{
 481        return;
 482}
 483#endif
 484
 485static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
 486                                    unsigned int devfn, u32 vdid)
 487{
 488        int i, irq, n;
 489        u8 __iomem *base;
 490        u32 tmp;
 491        u8 pos;
 492
 493        for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
 494             pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
 495                u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
 496                if (id == PCI_CAP_ID_HT) {
 497                        id = readb(devbase + pos + 3);
 498                        if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_IRQ)
 499                                break;
 500                }
 501        }
 502        if (pos == 0)
 503                return;
 504
 505        base = devbase + pos;
 506        writeb(0x01, base + 2);
 507        n = (readl(base + 4) >> 16) & 0xff;
 508
 509        printk(KERN_INFO "mpic:   - HT:%02x.%x [0x%02x] vendor %04x device %04x"
 510               " has %d irqs\n",
 511               devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1);
 512
 513        for (i = 0; i <= n; i++) {
 514                writeb(0x10 + 2 * i, base + 2);
 515                tmp = readl(base + 4);
 516                irq = (tmp >> 16) & 0xff;
 517                DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp);
 518                /* mask it , will be unmasked later */
 519                tmp |= 0x1;
 520                writel(tmp, base + 4);
 521                mpic->fixups[irq].index = i;
 522                mpic->fixups[irq].base = base;
 523                /* Apple HT PIC has a non-standard way of doing EOIs */
 524                if ((vdid & 0xffff) == 0x106b)
 525                        mpic->fixups[irq].applebase = devbase + 0x60;
 526                else
 527                        mpic->fixups[irq].applebase = NULL;
 528                writeb(0x11 + 2 * i, base + 2);
 529                mpic->fixups[irq].data = readl(base + 4) | 0x80000000;
 530        }
 531}
 532 
 533
 534static void __init mpic_scan_ht_pics(struct mpic *mpic)
 535{
 536        unsigned int devfn;
 537        u8 __iomem *cfgspace;
 538
 539        printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
 540
 541        /* Allocate fixups array */
 542        mpic->fixups = kzalloc(128 * sizeof(*mpic->fixups), GFP_KERNEL);
 543        BUG_ON(mpic->fixups == NULL);
 544
 545        /* Init spinlock */
 546        raw_spin_lock_init(&mpic->fixup_lock);
 547
 548        /* Map U3 config space. We assume all IO-APICs are on the primary bus
 549         * so we only need to map 64kB.
 550         */
 551        cfgspace = ioremap(0xf2000000, 0x10000);
 552        BUG_ON(cfgspace == NULL);
 553
 554        /* Now we scan all slots. We do a very quick scan, we read the header
 555         * type, vendor ID and device ID only, that's plenty enough
 556         */
 557        for (devfn = 0; devfn < 0x100; devfn++) {
 558                u8 __iomem *devbase = cfgspace + (devfn << 8);
 559                u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
 560                u32 l = readl(devbase + PCI_VENDOR_ID);
 561                u16 s;
 562
 563                DBG("devfn %x, l: %x\n", devfn, l);
 564
 565                /* If no device, skip */
 566                if (l == 0xffffffff || l == 0x00000000 ||
 567                    l == 0x0000ffff || l == 0xffff0000)
 568                        goto next;
 569                /* Check if is supports capability lists */
 570                s = readw(devbase + PCI_STATUS);
 571                if (!(s & PCI_STATUS_CAP_LIST))
 572                        goto next;
 573
 574                mpic_scan_ht_pic(mpic, devbase, devfn, l);
 575                mpic_scan_ht_msi(mpic, devbase, devfn);
 576
 577        next:
 578                /* next device, if function 0 */
 579                if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0)
 580                        devfn += 7;
 581        }
 582}
 583
 584#else /* CONFIG_MPIC_U3_HT_IRQS */
 585
 586static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
 587{
 588        return 0;
 589}
 590
 591static void __init mpic_scan_ht_pics(struct mpic *mpic)
 592{
 593}
 594
 595#endif /* CONFIG_MPIC_U3_HT_IRQS */
 596
 597/* Find an mpic associated with a given linux interrupt */
 598static struct mpic *mpic_find(unsigned int irq)
 599{
 600        if (irq < NUM_ISA_INTERRUPTS)
 601                return NULL;
 602
 603        return irq_get_chip_data(irq);
 604}
 605
 606/* Determine if the linux irq is an IPI */
 607static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int src)
 608{
 609        return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]);
 610}
 611
 612/* Determine if the linux irq is a timer */
 613static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int src)
 614{
 615        return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]);
 616}
 617
 618/* Convert a cpu mask from logical to physical cpu numbers. */
 619static inline u32 mpic_physmask(u32 cpumask)
 620{
 621        int i;
 622        u32 mask = 0;
 623
 624        for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
 625                mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
 626        return mask;
 627}
 628
 629#ifdef CONFIG_SMP
 630/* Get the mpic structure from the IPI number */
 631static inline struct mpic * mpic_from_ipi(struct irq_data *d)
 632{
 633        return irq_data_get_irq_chip_data(d);
 634}
 635#endif
 636
 637/* Get the mpic structure from the irq number */
 638static inline struct mpic * mpic_from_irq(unsigned int irq)
 639{
 640        return irq_get_chip_data(irq);
 641}
 642
 643/* Get the mpic structure from the irq data */
 644static inline struct mpic * mpic_from_irq_data(struct irq_data *d)
 645{
 646        return irq_data_get_irq_chip_data(d);
 647}
 648
 649/* Send an EOI */
 650static inline void mpic_eoi(struct mpic *mpic)
 651{
 652        mpic_cpu_write(MPIC_INFO(CPU_EOI), 0);
 653        (void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI));
 654}
 655
 656/*
 657 * Linux descriptor level callbacks
 658 */
 659
 660
 661void mpic_unmask_irq(struct irq_data *d)
 662{
 663        unsigned int loops = 100000;
 664        struct mpic *mpic = mpic_from_irq_data(d);
 665        unsigned int src = irqd_to_hwirq(d);
 666
 667        DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src);
 668
 669        mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
 670                       mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) &
 671                       ~MPIC_VECPRI_MASK);
 672        /* make sure mask gets to controller before we return to user */
 673        do {
 674                if (!loops--) {
 675                        printk(KERN_ERR "%s: timeout on hwirq %u\n",
 676                               __func__, src);
 677                        break;
 678                }
 679        } while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK);
 680}
 681
 682void mpic_mask_irq(struct irq_data *d)
 683{
 684        unsigned int loops = 100000;
 685        struct mpic *mpic = mpic_from_irq_data(d);
 686        unsigned int src = irqd_to_hwirq(d);
 687
 688        DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src);
 689
 690        mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
 691                       mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) |
 692                       MPIC_VECPRI_MASK);
 693
 694        /* make sure mask gets to controller before we return to user */
 695        do {
 696                if (!loops--) {
 697                        printk(KERN_ERR "%s: timeout on hwirq %u\n",
 698                               __func__, src);
 699                        break;
 700                }
 701        } while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK));
 702}
 703
 704void mpic_end_irq(struct irq_data *d)
 705{
 706        struct mpic *mpic = mpic_from_irq_data(d);
 707
 708#ifdef DEBUG_IRQ
 709        DBG("%s: end_irq: %d\n", mpic->name, d->irq);
 710#endif
 711        /* We always EOI on end_irq() even for edge interrupts since that
 712         * should only lower the priority, the MPIC should have properly
 713         * latched another edge interrupt coming in anyway
 714         */
 715
 716        mpic_eoi(mpic);
 717}
 718
 719#ifdef CONFIG_MPIC_U3_HT_IRQS
 720
 721static void mpic_unmask_ht_irq(struct irq_data *d)
 722{
 723        struct mpic *mpic = mpic_from_irq_data(d);
 724        unsigned int src = irqd_to_hwirq(d);
 725
 726        mpic_unmask_irq(d);
 727
 728        if (irqd_is_level_type(d))
 729                mpic_ht_end_irq(mpic, src);
 730}
 731
 732static unsigned int mpic_startup_ht_irq(struct irq_data *d)
 733{
 734        struct mpic *mpic = mpic_from_irq_data(d);
 735        unsigned int src = irqd_to_hwirq(d);
 736
 737        mpic_unmask_irq(d);
 738        mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d));
 739
 740        return 0;
 741}
 742
 743static void mpic_shutdown_ht_irq(struct irq_data *d)
 744{
 745        struct mpic *mpic = mpic_from_irq_data(d);
 746        unsigned int src = irqd_to_hwirq(d);
 747
 748        mpic_shutdown_ht_interrupt(mpic, src);
 749        mpic_mask_irq(d);
 750}
 751
 752static void mpic_end_ht_irq(struct irq_data *d)
 753{
 754        struct mpic *mpic = mpic_from_irq_data(d);
 755        unsigned int src = irqd_to_hwirq(d);
 756
 757#ifdef DEBUG_IRQ
 758        DBG("%s: end_irq: %d\n", mpic->name, d->irq);
 759#endif
 760        /* We always EOI on end_irq() even for edge interrupts since that
 761         * should only lower the priority, the MPIC should have properly
 762         * latched another edge interrupt coming in anyway
 763         */
 764
 765        if (irqd_is_level_type(d))
 766                mpic_ht_end_irq(mpic, src);
 767        mpic_eoi(mpic);
 768}
 769#endif /* !CONFIG_MPIC_U3_HT_IRQS */
 770
 771#ifdef CONFIG_SMP
 772
 773static void mpic_unmask_ipi(struct irq_data *d)
 774{
 775        struct mpic *mpic = mpic_from_ipi(d);
 776        unsigned int src = virq_to_hw(d->irq) - mpic->ipi_vecs[0];
 777
 778        DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src);
 779        mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
 780}
 781
 782static void mpic_mask_ipi(struct irq_data *d)
 783{
 784        /* NEVER disable an IPI... that's just plain wrong! */
 785}
 786
 787static void mpic_end_ipi(struct irq_data *d)
 788{
 789        struct mpic *mpic = mpic_from_ipi(d);
 790
 791        /*
 792         * IPIs are marked IRQ_PER_CPU. This has the side effect of
 793         * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
 794         * applying to them. We EOI them late to avoid re-entering.
 795         */
 796        mpic_eoi(mpic);
 797}
 798
 799#endif /* CONFIG_SMP */
 800
 801static void mpic_unmask_tm(struct irq_data *d)
 802{
 803        struct mpic *mpic = mpic_from_irq_data(d);
 804        unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
 805
 806        DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, d->irq, src);
 807        mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK);
 808        mpic_tm_read(src);
 809}
 810
 811static void mpic_mask_tm(struct irq_data *d)
 812{
 813        struct mpic *mpic = mpic_from_irq_data(d);
 814        unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
 815
 816        mpic_tm_write(src, mpic_tm_read(src) | MPIC_VECPRI_MASK);
 817        mpic_tm_read(src);
 818}
 819
 820int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
 821                      bool force)
 822{
 823        struct mpic *mpic = mpic_from_irq_data(d);
 824        unsigned int src = irqd_to_hwirq(d);
 825
 826        if (mpic->flags & MPIC_SINGLE_DEST_CPU) {
 827                int cpuid = irq_choose_cpu(cpumask);
 828
 829                mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
 830        } else {
 831                u32 mask = cpumask_bits(cpumask)[0];
 832
 833                mask &= cpumask_bits(cpu_online_mask)[0];
 834
 835                mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
 836                               mpic_physmask(mask));
 837        }
 838
 839        return 0;
 840}
 841
 842static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
 843{
 844        /* Now convert sense value */
 845        switch(type & IRQ_TYPE_SENSE_MASK) {
 846        case IRQ_TYPE_EDGE_RISING:
 847                return MPIC_INFO(VECPRI_SENSE_EDGE) |
 848                       MPIC_INFO(VECPRI_POLARITY_POSITIVE);
 849        case IRQ_TYPE_EDGE_FALLING:
 850        case IRQ_TYPE_EDGE_BOTH:
 851                return MPIC_INFO(VECPRI_SENSE_EDGE) |
 852                       MPIC_INFO(VECPRI_POLARITY_NEGATIVE);
 853        case IRQ_TYPE_LEVEL_HIGH:
 854                return MPIC_INFO(VECPRI_SENSE_LEVEL) |
 855                       MPIC_INFO(VECPRI_POLARITY_POSITIVE);
 856        case IRQ_TYPE_LEVEL_LOW:
 857        default:
 858                return MPIC_INFO(VECPRI_SENSE_LEVEL) |
 859                       MPIC_INFO(VECPRI_POLARITY_NEGATIVE);
 860        }
 861}
 862
 863int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
 864{
 865        struct mpic *mpic = mpic_from_irq_data(d);
 866        unsigned int src = irqd_to_hwirq(d);
 867        unsigned int vecpri, vold, vnew;
 868
 869        DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n",
 870            mpic, d->irq, src, flow_type);
 871
 872        if (src >= mpic->num_sources)
 873                return -EINVAL;
 874
 875        vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
 876
 877        /* We don't support "none" type */
 878        if (flow_type == IRQ_TYPE_NONE)
 879                flow_type = IRQ_TYPE_DEFAULT;
 880
 881        /* Default: read HW settings */
 882        if (flow_type == IRQ_TYPE_DEFAULT) {
 883                switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
 884                               MPIC_INFO(VECPRI_SENSE_MASK))) {
 885                        case MPIC_INFO(VECPRI_SENSE_EDGE) |
 886                             MPIC_INFO(VECPRI_POLARITY_POSITIVE):
 887                                flow_type = IRQ_TYPE_EDGE_RISING;
 888                                break;
 889                        case MPIC_INFO(VECPRI_SENSE_EDGE) |
 890                             MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
 891                                flow_type = IRQ_TYPE_EDGE_FALLING;
 892                                break;
 893                        case MPIC_INFO(VECPRI_SENSE_LEVEL) |
 894                             MPIC_INFO(VECPRI_POLARITY_POSITIVE):
 895                                flow_type = IRQ_TYPE_LEVEL_HIGH;
 896                                break;
 897                        case MPIC_INFO(VECPRI_SENSE_LEVEL) |
 898                             MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
 899                                flow_type = IRQ_TYPE_LEVEL_LOW;
 900                                break;
 901                }
 902        }
 903
 904        /* Apply to irq desc */
 905        irqd_set_trigger_type(d, flow_type);
 906
 907        /* Apply to HW */
 908        if (mpic_is_ht_interrupt(mpic, src))
 909                vecpri = MPIC_VECPRI_POLARITY_POSITIVE |
 910                        MPIC_VECPRI_SENSE_EDGE;
 911        else
 912                vecpri = mpic_type_to_vecpri(mpic, flow_type);
 913
 914        vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) |
 915                        MPIC_INFO(VECPRI_SENSE_MASK));
 916        vnew |= vecpri;
 917        if (vold != vnew)
 918                mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew);
 919
 920        return IRQ_SET_MASK_OK_NOCOPY;
 921}
 922
 923void mpic_set_vector(unsigned int virq, unsigned int vector)
 924{
 925        struct mpic *mpic = mpic_from_irq(virq);
 926        unsigned int src = virq_to_hw(virq);
 927        unsigned int vecpri;
 928
 929        DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n",
 930            mpic, virq, src, vector);
 931
 932        if (src >= mpic->num_sources)
 933                return;
 934
 935        vecpri = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
 936        vecpri = vecpri & ~MPIC_INFO(VECPRI_VECTOR_MASK);
 937        vecpri |= vector;
 938        mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
 939}
 940
 941void mpic_set_destination(unsigned int virq, unsigned int cpuid)
 942{
 943        struct mpic *mpic = mpic_from_irq(virq);
 944        unsigned int src = virq_to_hw(virq);
 945
 946        DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n",
 947            mpic, virq, src, cpuid);
 948
 949        if (src >= mpic->num_sources)
 950                return;
 951
 952        mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
 953}
 954
 955static struct irq_chip mpic_irq_chip = {
 956        .irq_mask       = mpic_mask_irq,
 957        .irq_unmask     = mpic_unmask_irq,
 958        .irq_eoi        = mpic_end_irq,
 959        .irq_set_type   = mpic_set_irq_type,
 960};
 961
 962#ifdef CONFIG_SMP
 963static struct irq_chip mpic_ipi_chip = {
 964        .irq_mask       = mpic_mask_ipi,
 965        .irq_unmask     = mpic_unmask_ipi,
 966        .irq_eoi        = mpic_end_ipi,
 967};
 968#endif /* CONFIG_SMP */
 969
 970static struct irq_chip mpic_tm_chip = {
 971        .irq_mask       = mpic_mask_tm,
 972        .irq_unmask     = mpic_unmask_tm,
 973        .irq_eoi        = mpic_end_irq,
 974};
 975
 976#ifdef CONFIG_MPIC_U3_HT_IRQS
 977static struct irq_chip mpic_irq_ht_chip = {
 978        .irq_startup    = mpic_startup_ht_irq,
 979        .irq_shutdown   = mpic_shutdown_ht_irq,
 980        .irq_mask       = mpic_mask_irq,
 981        .irq_unmask     = mpic_unmask_ht_irq,
 982        .irq_eoi        = mpic_end_ht_irq,
 983        .irq_set_type   = mpic_set_irq_type,
 984};
 985#endif /* CONFIG_MPIC_U3_HT_IRQS */
 986
 987
 988static int mpic_host_match(struct irq_domain *h, struct device_node *node)
 989{
 990        /* Exact match, unless mpic node is NULL */
 991        return h->of_node == NULL || h->of_node == node;
 992}
 993
 994static int mpic_host_map(struct irq_domain *h, unsigned int virq,
 995                         irq_hw_number_t hw)
 996{
 997        struct mpic *mpic = h->host_data;
 998        struct irq_chip *chip;
 999
1000        DBG("mpic: map virq %d, hwirq 0x%lx\n", virq, hw);
1001
1002        if (hw == mpic->spurious_vec)
1003                return -EINVAL;
1004        if (mpic->protected && test_bit(hw, mpic->protected))
1005                return -EINVAL;
1006
1007#ifdef CONFIG_SMP
1008        else if (hw >= mpic->ipi_vecs[0]) {
1009                WARN_ON(mpic->flags & MPIC_SECONDARY);
1010
1011                DBG("mpic: mapping as IPI\n");
1012                irq_set_chip_data(virq, mpic);
1013                irq_set_chip_and_handler(virq, &mpic->hc_ipi,
1014                                         handle_percpu_irq);
1015                return 0;
1016        }
1017#endif /* CONFIG_SMP */
1018
1019        if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) {
1020                WARN_ON(mpic->flags & MPIC_SECONDARY);
1021
1022                DBG("mpic: mapping as timer\n");
1023                irq_set_chip_data(virq, mpic);
1024                irq_set_chip_and_handler(virq, &mpic->hc_tm,
1025                                         handle_fasteoi_irq);
1026                return 0;
1027        }
1028
1029        if (mpic_map_error_int(mpic, virq, hw))
1030                return 0;
1031
1032        if (hw >= mpic->num_sources)
1033                return -EINVAL;
1034
1035        mpic_msi_reserve_hwirq(mpic, hw);
1036
1037        /* Default chip */
1038        chip = &mpic->hc_irq;
1039
1040#ifdef CONFIG_MPIC_U3_HT_IRQS
1041        /* Check for HT interrupts, override vecpri */
1042        if (mpic_is_ht_interrupt(mpic, hw))
1043                chip = &mpic->hc_ht_irq;
1044#endif /* CONFIG_MPIC_U3_HT_IRQS */
1045
1046        DBG("mpic: mapping to irq chip @%p\n", chip);
1047
1048        irq_set_chip_data(virq, mpic);
1049        irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
1050
1051        /* Set default irq type */
1052        irq_set_irq_type(virq, IRQ_TYPE_DEFAULT);
1053
1054        /* If the MPIC was reset, then all vectors have already been
1055         * initialized.  Otherwise, a per source lazy initialization
1056         * is done here.
1057         */
1058        if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) {
1059                mpic_set_vector(virq, hw);
1060                mpic_set_destination(virq, mpic_processor_id(mpic));
1061                mpic_irq_set_priority(virq, 8);
1062        }
1063
1064        return 0;
1065}
1066
1067static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct,
1068                           const u32 *intspec, unsigned int intsize,
1069                           irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1070
1071{
1072        struct mpic *mpic = h->host_data;
1073        static unsigned char map_mpic_senses[4] = {
1074                IRQ_TYPE_EDGE_RISING,
1075                IRQ_TYPE_LEVEL_LOW,
1076                IRQ_TYPE_LEVEL_HIGH,
1077                IRQ_TYPE_EDGE_FALLING,
1078        };
1079
1080        *out_hwirq = intspec[0];
1081        if (intsize >= 4 && (mpic->flags & MPIC_FSL)) {
1082                /*
1083                 * Freescale MPIC with extended intspec:
1084                 * First two cells are as usual.  Third specifies
1085                 * an "interrupt type".  Fourth is type-specific data.
1086                 *
1087                 * See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
1088                 */
1089                switch (intspec[2]) {
1090                case 0:
1091                        break;
1092                case 1:
1093                        if (!(mpic->flags & MPIC_FSL_HAS_EIMR))
1094                                break;
1095
1096                        if (intspec[3] >= ARRAY_SIZE(mpic->err_int_vecs))
1097                                return -EINVAL;
1098
1099                        *out_hwirq = mpic->err_int_vecs[intspec[3]];
1100
1101                        break;
1102                case 2:
1103                        if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs))
1104                                return -EINVAL;
1105
1106                        *out_hwirq = mpic->ipi_vecs[intspec[0]];
1107                        break;
1108                case 3:
1109                        if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs))
1110                                return -EINVAL;
1111
1112                        *out_hwirq = mpic->timer_vecs[intspec[0]];
1113                        break;
1114                default:
1115                        pr_debug("%s: unknown irq type %u\n",
1116                                 __func__, intspec[2]);
1117                        return -EINVAL;
1118                }
1119
1120                *out_flags = map_mpic_senses[intspec[1] & 3];
1121        } else if (intsize > 1) {
1122                u32 mask = 0x3;
1123
1124                /* Apple invented a new race of encoding on machines with
1125                 * an HT APIC. They encode, among others, the index within
1126                 * the HT APIC. We don't care about it here since thankfully,
1127                 * it appears that they have the APIC already properly
1128                 * configured, and thus our current fixup code that reads the
1129                 * APIC config works fine. However, we still need to mask out
1130                 * bits in the specifier to make sure we only get bit 0 which
1131                 * is the level/edge bit (the only sense bit exposed by Apple),
1132                 * as their bit 1 means something else.
1133                 */
1134                if (machine_is(powermac))
1135                        mask = 0x1;
1136                *out_flags = map_mpic_senses[intspec[1] & mask];
1137        } else
1138                *out_flags = IRQ_TYPE_NONE;
1139
1140        DBG("mpic: xlate (%d cells: 0x%08x 0x%08x) to line 0x%lx sense 0x%x\n",
1141            intsize, intspec[0], intspec[1], *out_hwirq, *out_flags);
1142
1143        return 0;
1144}
1145
1146/* IRQ handler for a secondary MPIC cascaded from another IRQ controller */
1147static void mpic_cascade(unsigned int irq, struct irq_desc *desc)
1148{
1149        struct irq_chip *chip = irq_desc_get_chip(desc);
1150        struct mpic *mpic = irq_desc_get_handler_data(desc);
1151        unsigned int virq;
1152
1153        BUG_ON(!(mpic->flags & MPIC_SECONDARY));
1154
1155        virq = mpic_get_one_irq(mpic);
1156        if (virq)
1157                generic_handle_irq(virq);
1158
1159        chip->irq_eoi(&desc->irq_data);
1160}
1161
1162static struct irq_domain_ops mpic_host_ops = {
1163        .match = mpic_host_match,
1164        .map = mpic_host_map,
1165        .xlate = mpic_host_xlate,
1166};
1167
1168/*
1169 * Exported functions
1170 */
1171
1172struct mpic * __init mpic_alloc(struct device_node *node,
1173                                phys_addr_t phys_addr,
1174                                unsigned int flags,
1175                                unsigned int isu_size,
1176                                unsigned int irq_count,
1177                                const char *name)
1178{
1179        int i, psize, intvec_top;
1180        struct mpic *mpic;
1181        u32 greg_feature;
1182        const char *vers;
1183        const u32 *psrc;
1184        u32 last_irq;
1185
1186        /* Default MPIC search parameters */
1187        static const struct of_device_id __initconst mpic_device_id[] = {
1188                { .type       = "open-pic", },
1189                { .compatible = "open-pic", },
1190                {},
1191        };
1192
1193        /*
1194         * If we were not passed a device-tree node, then perform the default
1195         * search for standardized a standardized OpenPIC.
1196         */
1197        if (node) {
1198                node = of_node_get(node);
1199        } else {
1200                node = of_find_matching_node(NULL, mpic_device_id);
1201                if (!node)
1202                        return NULL;
1203        }
1204
1205        /* Pick the physical address from the device tree if unspecified */
1206        if (!phys_addr) {
1207                /* Check if it is DCR-based */
1208                if (of_get_property(node, "dcr-reg", NULL)) {
1209                        flags |= MPIC_USES_DCR;
1210                } else {
1211                        struct resource r;
1212                        if (of_address_to_resource(node, 0, &r))
1213                                goto err_of_node_put;
1214                        phys_addr = r.start;
1215                }
1216        }
1217
1218        /* Read extra device-tree properties into the flags variable */
1219        if (of_get_property(node, "big-endian", NULL))
1220                flags |= MPIC_BIG_ENDIAN;
1221        if (of_get_property(node, "pic-no-reset", NULL))
1222                flags |= MPIC_NO_RESET;
1223        if (of_get_property(node, "single-cpu-affinity", NULL))
1224                flags |= MPIC_SINGLE_DEST_CPU;
1225        if (of_device_is_compatible(node, "fsl,mpic"))
1226                flags |= MPIC_FSL | MPIC_LARGE_VECTORS;
1227
1228        mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL);
1229        if (mpic == NULL)
1230                goto err_of_node_put;
1231
1232        mpic->name = name;
1233        mpic->node = node;
1234        mpic->paddr = phys_addr;
1235        mpic->flags = flags;
1236
1237        mpic->hc_irq = mpic_irq_chip;
1238        mpic->hc_irq.name = name;
1239        if (!(mpic->flags & MPIC_SECONDARY))
1240                mpic->hc_irq.irq_set_affinity = mpic_set_affinity;
1241#ifdef CONFIG_MPIC_U3_HT_IRQS
1242        mpic->hc_ht_irq = mpic_irq_ht_chip;
1243        mpic->hc_ht_irq.name = name;
1244        if (!(mpic->flags & MPIC_SECONDARY))
1245                mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity;
1246#endif /* CONFIG_MPIC_U3_HT_IRQS */
1247
1248#ifdef CONFIG_SMP
1249        mpic->hc_ipi = mpic_ipi_chip;
1250        mpic->hc_ipi.name = name;
1251#endif /* CONFIG_SMP */
1252
1253        mpic->hc_tm = mpic_tm_chip;
1254        mpic->hc_tm.name = name;
1255
1256        mpic->num_sources = 0; /* so far */
1257
1258        if (mpic->flags & MPIC_LARGE_VECTORS)
1259                intvec_top = 2047;
1260        else
1261                intvec_top = 255;
1262
1263        mpic->timer_vecs[0] = intvec_top - 12;
1264        mpic->timer_vecs[1] = intvec_top - 11;
1265        mpic->timer_vecs[2] = intvec_top - 10;
1266        mpic->timer_vecs[3] = intvec_top - 9;
1267        mpic->timer_vecs[4] = intvec_top - 8;
1268        mpic->timer_vecs[5] = intvec_top - 7;
1269        mpic->timer_vecs[6] = intvec_top - 6;
1270        mpic->timer_vecs[7] = intvec_top - 5;
1271        mpic->ipi_vecs[0]   = intvec_top - 4;
1272        mpic->ipi_vecs[1]   = intvec_top - 3;
1273        mpic->ipi_vecs[2]   = intvec_top - 2;
1274        mpic->ipi_vecs[3]   = intvec_top - 1;
1275        mpic->spurious_vec  = intvec_top;
1276
1277        /* Look for protected sources */
1278        psrc = of_get_property(mpic->node, "protected-sources", &psize);
1279        if (psrc) {
1280                /* Allocate a bitmap with one bit per interrupt */
1281                unsigned int mapsize = BITS_TO_LONGS(intvec_top + 1);
1282                mpic->protected = kzalloc(mapsize*sizeof(long), GFP_KERNEL);
1283                BUG_ON(mpic->protected == NULL);
1284                for (i = 0; i < psize/sizeof(u32); i++) {
1285                        if (psrc[i] > intvec_top)
1286                                continue;
1287                        __set_bit(psrc[i], mpic->protected);
1288                }
1289        }
1290
1291#ifdef CONFIG_MPIC_WEIRD
1292        mpic->hw_set = mpic_infos[MPIC_GET_REGSET(mpic->flags)];
1293#endif
1294
1295        /* default register type */
1296        if (mpic->flags & MPIC_BIG_ENDIAN)
1297                mpic->reg_type = mpic_access_mmio_be;
1298        else
1299                mpic->reg_type = mpic_access_mmio_le;
1300
1301        /*
1302         * An MPIC with a "dcr-reg" property must be accessed that way, but
1303         * only if the kernel includes DCR support.
1304         */
1305#ifdef CONFIG_PPC_DCR
1306        if (mpic->flags & MPIC_USES_DCR)
1307                mpic->reg_type = mpic_access_dcr;
1308#else
1309        BUG_ON(mpic->flags & MPIC_USES_DCR);
1310#endif
1311
1312        /* Map the global registers */
1313        mpic_map(mpic, mpic->paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
1314        mpic_map(mpic, mpic->paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
1315
1316        if (mpic->flags & MPIC_FSL) {
1317                u32 brr1, version;
1318                int ret;
1319
1320                /*
1321                 * Yes, Freescale really did put global registers in the
1322                 * magic per-cpu area -- and they don't even show up in the
1323                 * non-magic per-cpu copies that this driver normally uses.
1324                 */
1325                mpic_map(mpic, mpic->paddr, &mpic->thiscpuregs,
1326                         MPIC_CPU_THISBASE, 0x1000);
1327
1328                brr1 = _mpic_read(mpic->reg_type, &mpic->thiscpuregs,
1329                                MPIC_FSL_BRR1);
1330                version = brr1 & MPIC_FSL_BRR1_VER;
1331
1332                /* Error interrupt mask register (EIMR) is required for
1333                 * handling individual device error interrupts. EIMR
1334                 * was added in MPIC version 4.1.
1335                 *
1336                 * Over here we reserve vector number space for error
1337                 * interrupt vectors. This space is stolen from the
1338                 * global vector number space, as in case of ipis
1339                 * and timer interrupts.
1340                 *
1341                 * Available vector space = intvec_top - 12, where 12
1342                 * is the number of vectors which have been consumed by
1343                 * ipis and timer interrupts.
1344                 */
1345                if (version >= 0x401) {
1346                        ret = mpic_setup_error_int(mpic, intvec_top - 12);
1347                        if (ret)
1348                                return NULL;
1349                }
1350        }
1351
1352        /* Reset */
1353
1354        /* When using a device-node, reset requests are only honored if the MPIC
1355         * is allowed to reset.
1356         */
1357        if (!(mpic->flags & MPIC_NO_RESET)) {
1358                printk(KERN_DEBUG "mpic: Resetting\n");
1359                mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1360                           mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1361                           | MPIC_GREG_GCONF_RESET);
1362                while( mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1363                       & MPIC_GREG_GCONF_RESET)
1364                        mb();
1365        }
1366
1367        /* CoreInt */
1368        if (mpic->flags & MPIC_ENABLE_COREINT)
1369                mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1370                           mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1371                           | MPIC_GREG_GCONF_COREINT);
1372
1373        if (mpic->flags & MPIC_ENABLE_MCK)
1374                mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1375                           mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1376                           | MPIC_GREG_GCONF_MCK);
1377
1378        /*
1379         * The MPIC driver will crash if there are more cores than we
1380         * can initialize, so we may as well catch that problem here.
1381         */
1382        BUG_ON(num_possible_cpus() > MPIC_MAX_CPUS);
1383
1384        /* Map the per-CPU registers */
1385        for_each_possible_cpu(i) {
1386                unsigned int cpu = get_hard_smp_processor_id(i);
1387
1388                mpic_map(mpic, mpic->paddr, &mpic->cpuregs[cpu],
1389                         MPIC_INFO(CPU_BASE) + cpu * MPIC_INFO(CPU_STRIDE),
1390                         0x1000);
1391        }
1392
1393        /*
1394         * Read feature register.  For non-ISU MPICs, num sources as well. On
1395         * ISU MPICs, sources are counted as ISUs are added
1396         */
1397        greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0));
1398
1399        /*
1400         * By default, the last source number comes from the MPIC, but the
1401         * device-tree and board support code can override it on buggy hw.
1402         * If we get passed an isu_size (multi-isu MPIC) then we use that
1403         * as a default instead of the value read from the HW.
1404         */
1405        last_irq = (greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK)
1406                                >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT;    
1407        if (isu_size)
1408                last_irq = isu_size  * MPIC_MAX_ISU - 1;
1409        of_property_read_u32(mpic->node, "last-interrupt-source", &last_irq);
1410        if (irq_count)
1411                last_irq = irq_count - 1;
1412
1413        /* Initialize main ISU if none provided */
1414        if (!isu_size) {
1415                isu_size = last_irq + 1;
1416                mpic->num_sources = isu_size;
1417                mpic_map(mpic, mpic->paddr, &mpic->isus[0],
1418                                MPIC_INFO(IRQ_BASE),
1419                                MPIC_INFO(IRQ_STRIDE) * isu_size);
1420        }
1421
1422        mpic->isu_size = isu_size;
1423        mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
1424        mpic->isu_mask = (1 << mpic->isu_shift) - 1;
1425
1426        mpic->irqhost = irq_domain_add_linear(mpic->node,
1427                                       intvec_top,
1428                                       &mpic_host_ops, mpic);
1429
1430        /*
1431         * FIXME: The code leaks the MPIC object and mappings here; this
1432         * is very unlikely to fail but it ought to be fixed anyways.
1433         */
1434        if (mpic->irqhost == NULL)
1435                return NULL;
1436
1437        /* Display version */
1438        switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) {
1439        case 1:
1440                vers = "1.0";
1441                break;
1442        case 2:
1443                vers = "1.2";
1444                break;
1445        case 3:
1446                vers = "1.3";
1447                break;
1448        default:
1449                vers = "<unknown>";
1450                break;
1451        }
1452        printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx,"
1453               " max %d CPUs\n",
1454               name, vers, (unsigned long long)mpic->paddr, num_possible_cpus());
1455        printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n",
1456               mpic->isu_size, mpic->isu_shift, mpic->isu_mask);
1457
1458        mpic->next = mpics;
1459        mpics = mpic;
1460
1461        if (!(mpic->flags & MPIC_SECONDARY)) {
1462                mpic_primary = mpic;
1463                irq_set_default_host(mpic->irqhost);
1464        }
1465
1466        return mpic;
1467
1468err_of_node_put:
1469        of_node_put(node);
1470        return NULL;
1471}
1472
1473void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
1474                            phys_addr_t paddr)
1475{
1476        unsigned int isu_first = isu_num * mpic->isu_size;
1477
1478        BUG_ON(isu_num >= MPIC_MAX_ISU);
1479
1480        mpic_map(mpic,
1481                 paddr, &mpic->isus[isu_num], 0,
1482                 MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
1483
1484        if ((isu_first + mpic->isu_size) > mpic->num_sources)
1485                mpic->num_sources = isu_first + mpic->isu_size;
1486}
1487
1488void __init mpic_init(struct mpic *mpic)
1489{
1490        int i, cpu;
1491        int num_timers = 4;
1492
1493        BUG_ON(mpic->num_sources == 0);
1494
1495        printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
1496
1497        /* Set current processor priority to max */
1498        mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
1499
1500        if (mpic->flags & MPIC_FSL) {
1501                u32 brr1 = _mpic_read(mpic->reg_type, &mpic->thiscpuregs,
1502                                      MPIC_FSL_BRR1);
1503                u32 version = brr1 & MPIC_FSL_BRR1_VER;
1504
1505                /*
1506                 * Timer group B is present at the latest in MPIC 3.1 (e.g.
1507                 * mpc8536).  It is not present in MPIC 2.0 (e.g. mpc8544).
1508                 * I don't know about the status of intermediate versions (or
1509                 * whether they even exist).
1510                 */
1511                if (version >= 0x0301)
1512                        num_timers = 8;
1513        }
1514
1515        /* FSL mpic error interrupt intialization */
1516        if (mpic->flags & MPIC_FSL_HAS_EIMR)
1517                mpic_err_int_init(mpic, MPIC_FSL_ERR_INT);
1518
1519        /* Initialize timers to our reserved vectors and mask them for now */
1520        for (i = 0; i < num_timers; i++) {
1521                unsigned int offset = mpic_tm_offset(mpic, i);
1522
1523                mpic_write(mpic->tmregs,
1524                           offset + MPIC_INFO(TIMER_DESTINATION),
1525                           1 << hard_smp_processor_id());
1526                mpic_write(mpic->tmregs,
1527                           offset + MPIC_INFO(TIMER_VECTOR_PRI),
1528                           MPIC_VECPRI_MASK |
1529                           (9 << MPIC_VECPRI_PRIORITY_SHIFT) |
1530                           (mpic->timer_vecs[0] + i));
1531        }
1532
1533        /* Initialize IPIs to our reserved vectors and mark them disabled for now */
1534        mpic_test_broken_ipi(mpic);
1535        for (i = 0; i < 4; i++) {
1536                mpic_ipi_write(i,
1537                               MPIC_VECPRI_MASK |
1538                               (10 << MPIC_VECPRI_PRIORITY_SHIFT) |
1539                               (mpic->ipi_vecs[0] + i));
1540        }
1541
1542        /* Do the HT PIC fixups on U3 broken mpic */
1543        DBG("MPIC flags: %x\n", mpic->flags);
1544        if ((mpic->flags & MPIC_U3_HT_IRQS) && !(mpic->flags & MPIC_SECONDARY)) {
1545                mpic_scan_ht_pics(mpic);
1546                mpic_u3msi_init(mpic);
1547        }
1548
1549        mpic_pasemi_msi_init(mpic);
1550
1551        cpu = mpic_processor_id(mpic);
1552
1553        if (!(mpic->flags & MPIC_NO_RESET)) {
1554                for (i = 0; i < mpic->num_sources; i++) {
1555                        /* start with vector = source number, and masked */
1556                        u32 vecpri = MPIC_VECPRI_MASK | i |
1557                                (8 << MPIC_VECPRI_PRIORITY_SHIFT);
1558                
1559                        /* check if protected */
1560                        if (mpic->protected && test_bit(i, mpic->protected))
1561                                continue;
1562                        /* init hw */
1563                        mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
1564                        mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu);
1565                }
1566        }
1567        
1568        /* Init spurious vector */
1569        mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), mpic->spurious_vec);
1570
1571        /* Disable 8259 passthrough, if supported */
1572        if (!(mpic->flags & MPIC_NO_PTHROU_DIS))
1573                mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1574                           mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1575                           | MPIC_GREG_GCONF_8259_PTHROU_DIS);
1576
1577        if (mpic->flags & MPIC_NO_BIAS)
1578                mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1579                        mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1580                        | MPIC_GREG_GCONF_NO_BIAS);
1581
1582        /* Set current processor priority to 0 */
1583        mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
1584
1585#ifdef CONFIG_PM
1586        /* allocate memory to save mpic state */
1587        mpic->save_data = kmalloc(mpic->num_sources * sizeof(*mpic->save_data),
1588                                  GFP_KERNEL);
1589        BUG_ON(mpic->save_data == NULL);
1590#endif
1591
1592        /* Check if this MPIC is chained from a parent interrupt controller */
1593        if (mpic->flags & MPIC_SECONDARY) {
1594                int virq = irq_of_parse_and_map(mpic->node, 0);
1595                if (virq != NO_IRQ) {
1596                        printk(KERN_INFO "%s: hooking up to IRQ %d\n",
1597                                        mpic->node->full_name, virq);
1598                        irq_set_handler_data(virq, mpic);
1599                        irq_set_chained_handler(virq, &mpic_cascade);
1600                }
1601        }
1602}
1603
1604void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
1605{
1606        u32 v;
1607
1608        v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
1609        v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK;
1610        v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio);
1611        mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
1612}
1613
1614void __init mpic_set_serial_int(struct mpic *mpic, int enable)
1615{
1616        unsigned long flags;
1617        u32 v;
1618
1619        raw_spin_lock_irqsave(&mpic_lock, flags);
1620        v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
1621        if (enable)
1622                v |= MPIC_GREG_GLOBAL_CONF_1_SIE;
1623        else
1624                v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE;
1625        mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
1626        raw_spin_unlock_irqrestore(&mpic_lock, flags);
1627}
1628
1629void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
1630{
1631        struct mpic *mpic = mpic_find(irq);
1632        unsigned int src = virq_to_hw(irq);
1633        unsigned long flags;
1634        u32 reg;
1635
1636        if (!mpic)
1637                return;
1638
1639        raw_spin_lock_irqsave(&mpic_lock, flags);
1640        if (mpic_is_ipi(mpic, src)) {
1641                reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) &
1642                        ~MPIC_VECPRI_PRIORITY_MASK;
1643                mpic_ipi_write(src - mpic->ipi_vecs[0],
1644                               reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1645        } else if (mpic_is_tm(mpic, src)) {
1646                reg = mpic_tm_read(src - mpic->timer_vecs[0]) &
1647                        ~MPIC_VECPRI_PRIORITY_MASK;
1648                mpic_tm_write(src - mpic->timer_vecs[0],
1649                              reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1650        } else {
1651                reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI))
1652                        & ~MPIC_VECPRI_PRIORITY_MASK;
1653                mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
1654                               reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1655        }
1656        raw_spin_unlock_irqrestore(&mpic_lock, flags);
1657}
1658
1659void mpic_setup_this_cpu(void)
1660{
1661#ifdef CONFIG_SMP
1662        struct mpic *mpic = mpic_primary;
1663        unsigned long flags;
1664        u32 msk = 1 << hard_smp_processor_id();
1665        unsigned int i;
1666
1667        BUG_ON(mpic == NULL);
1668
1669        DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
1670
1671        raw_spin_lock_irqsave(&mpic_lock, flags);
1672
1673        /* let the mpic know we want intrs. default affinity is 0xffffffff
1674         * until changed via /proc. That's how it's done on x86. If we want
1675         * it differently, then we should make sure we also change the default
1676         * values of irq_desc[].affinity in irq.c.
1677         */
1678        if (distribute_irqs) {
1679                for (i = 0; i < mpic->num_sources ; i++)
1680                        mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
1681                                mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk);
1682        }
1683
1684        /* Set current processor priority to 0 */
1685        mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
1686
1687        raw_spin_unlock_irqrestore(&mpic_lock, flags);
1688#endif /* CONFIG_SMP */
1689}
1690
1691int mpic_cpu_get_priority(void)
1692{
1693        struct mpic *mpic = mpic_primary;
1694
1695        return mpic_cpu_read(MPIC_INFO(CPU_CURRENT_TASK_PRI));
1696}
1697
1698void mpic_cpu_set_priority(int prio)
1699{
1700        struct mpic *mpic = mpic_primary;
1701
1702        prio &= MPIC_CPU_TASKPRI_MASK;
1703        mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), prio);
1704}
1705
1706void mpic_teardown_this_cpu(int secondary)
1707{
1708        struct mpic *mpic = mpic_primary;
1709        unsigned long flags;
1710        u32 msk = 1 << hard_smp_processor_id();
1711        unsigned int i;
1712
1713        BUG_ON(mpic == NULL);
1714
1715        DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
1716        raw_spin_lock_irqsave(&mpic_lock, flags);
1717
1718        /* let the mpic know we don't want intrs.  */
1719        for (i = 0; i < mpic->num_sources ; i++)
1720                mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
1721                        mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) & ~msk);
1722
1723        /* Set current processor priority to max */
1724        mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
1725        /* We need to EOI the IPI since not all platforms reset the MPIC
1726         * on boot and new interrupts wouldn't get delivered otherwise.
1727         */
1728        mpic_eoi(mpic);
1729
1730        raw_spin_unlock_irqrestore(&mpic_lock, flags);
1731}
1732
1733
1734static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
1735{
1736        u32 src;
1737
1738        src = mpic_cpu_read(reg) & MPIC_INFO(VECPRI_VECTOR_MASK);
1739#ifdef DEBUG_LOW
1740        DBG("%s: get_one_irq(reg 0x%x): %d\n", mpic->name, reg, src);
1741#endif
1742        if (unlikely(src == mpic->spurious_vec)) {
1743                if (mpic->flags & MPIC_SPV_EOI)
1744                        mpic_eoi(mpic);
1745                return NO_IRQ;
1746        }
1747        if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
1748                printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
1749                                   mpic->name, (int)src);
1750                mpic_eoi(mpic);
1751                return NO_IRQ;
1752        }
1753
1754        return irq_linear_revmap(mpic->irqhost, src);
1755}
1756
1757unsigned int mpic_get_one_irq(struct mpic *mpic)
1758{
1759        return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_INTACK));
1760}
1761
1762unsigned int mpic_get_irq(void)
1763{
1764        struct mpic *mpic = mpic_primary;
1765
1766        BUG_ON(mpic == NULL);
1767
1768        return mpic_get_one_irq(mpic);
1769}
1770
1771unsigned int mpic_get_coreint_irq(void)
1772{
1773#ifdef CONFIG_BOOKE
1774        struct mpic *mpic = mpic_primary;
1775        u32 src;
1776
1777        BUG_ON(mpic == NULL);
1778
1779        src = mfspr(SPRN_EPR);
1780
1781        if (unlikely(src == mpic->spurious_vec)) {
1782                if (mpic->flags & MPIC_SPV_EOI)
1783                        mpic_eoi(mpic);
1784                return NO_IRQ;
1785        }
1786        if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
1787                printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
1788                                   mpic->name, (int)src);
1789                return NO_IRQ;
1790        }
1791
1792        return irq_linear_revmap(mpic->irqhost, src);
1793#else
1794        return NO_IRQ;
1795#endif
1796}
1797
1798unsigned int mpic_get_mcirq(void)
1799{
1800        struct mpic *mpic = mpic_primary;
1801
1802        BUG_ON(mpic == NULL);
1803
1804        return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_MCACK));
1805}
1806
1807#ifdef CONFIG_SMP
1808void mpic_request_ipis(void)
1809{
1810        struct mpic *mpic = mpic_primary;
1811        int i;
1812        BUG_ON(mpic == NULL);
1813
1814        printk(KERN_INFO "mpic: requesting IPIs...\n");
1815
1816        for (i = 0; i < 4; i++) {
1817                unsigned int vipi = irq_create_mapping(mpic->irqhost,
1818                                                       mpic->ipi_vecs[0] + i);
1819                if (vipi == NO_IRQ) {
1820                        printk(KERN_ERR "Failed to map %s\n", smp_ipi_name[i]);
1821                        continue;
1822                }
1823                smp_request_message_ipi(vipi, i);
1824        }
1825}
1826
1827void smp_mpic_message_pass(int cpu, int msg)
1828{
1829        struct mpic *mpic = mpic_primary;
1830        u32 physmask;
1831
1832        BUG_ON(mpic == NULL);
1833
1834        /* make sure we're sending something that translates to an IPI */
1835        if ((unsigned int)msg > 3) {
1836                printk("SMP %d: smp_message_pass: unknown msg %d\n",
1837                       smp_processor_id(), msg);
1838                return;
1839        }
1840
1841#ifdef DEBUG_IPI
1842        DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg);
1843#endif
1844
1845        physmask = 1 << get_hard_smp_processor_id(cpu);
1846
1847        mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
1848                       msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask);
1849}
1850
1851int __init smp_mpic_probe(void)
1852{
1853        int nr_cpus;
1854
1855        DBG("smp_mpic_probe()...\n");
1856
1857        nr_cpus = cpumask_weight(cpu_possible_mask);
1858
1859        DBG("nr_cpus: %d\n", nr_cpus);
1860
1861        if (nr_cpus > 1)
1862                mpic_request_ipis();
1863
1864        return nr_cpus;
1865}
1866
1867void smp_mpic_setup_cpu(int cpu)
1868{
1869        mpic_setup_this_cpu();
1870}
1871
1872void mpic_reset_core(int cpu)
1873{
1874        struct mpic *mpic = mpic_primary;
1875        u32 pir;
1876        int cpuid = get_hard_smp_processor_id(cpu);
1877        int i;
1878
1879        /* Set target bit for core reset */
1880        pir = mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
1881        pir |= (1 << cpuid);
1882        mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir);
1883        mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
1884
1885        /* Restore target bit after reset complete */
1886        pir &= ~(1 << cpuid);
1887        mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir);
1888        mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
1889
1890        /* Perform 15 EOI on each reset core to clear pending interrupts.
1891         * This is required for FSL CoreNet based devices */
1892        if (mpic->flags & MPIC_FSL) {
1893                for (i = 0; i < 15; i++) {
1894                        _mpic_write(mpic->reg_type, &mpic->cpuregs[cpuid],
1895                                      MPIC_CPU_EOI, 0);
1896                }
1897        }
1898}
1899#endif /* CONFIG_SMP */
1900
1901#ifdef CONFIG_PM
1902static void mpic_suspend_one(struct mpic *mpic)
1903{
1904        int i;
1905
1906        for (i = 0; i < mpic->num_sources; i++) {
1907                mpic->save_data[i].vecprio =
1908                        mpic_irq_read(i, MPIC_INFO(IRQ_VECTOR_PRI));
1909                mpic->save_data[i].dest =
1910                        mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION));
1911        }
1912}
1913
1914static int mpic_suspend(void)
1915{
1916        struct mpic *mpic = mpics;
1917
1918        while (mpic) {
1919                mpic_suspend_one(mpic);
1920                mpic = mpic->next;
1921        }
1922
1923        return 0;
1924}
1925
1926static void mpic_resume_one(struct mpic *mpic)
1927{
1928        int i;
1929
1930        for (i = 0; i < mpic->num_sources; i++) {
1931                mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI),
1932                               mpic->save_data[i].vecprio);
1933                mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
1934                               mpic->save_data[i].dest);
1935
1936#ifdef CONFIG_MPIC_U3_HT_IRQS
1937        if (mpic->fixups) {
1938                struct mpic_irq_fixup *fixup = &mpic->fixups[i];
1939
1940                if (fixup->base) {
1941                        /* we use the lowest bit in an inverted meaning */
1942                        if ((mpic->save_data[i].fixup_data & 1) == 0)
1943                                continue;
1944
1945                        /* Enable and configure */
1946                        writeb(0x10 + 2 * fixup->index, fixup->base + 2);
1947
1948                        writel(mpic->save_data[i].fixup_data & ~1,
1949                               fixup->base + 4);
1950                }
1951        }
1952#endif
1953        } /* end for loop */
1954}
1955
1956static void mpic_resume(void)
1957{
1958        struct mpic *mpic = mpics;
1959
1960        while (mpic) {
1961                mpic_resume_one(mpic);
1962                mpic = mpic->next;
1963        }
1964}
1965
1966static struct syscore_ops mpic_syscore_ops = {
1967        .resume = mpic_resume,
1968        .suspend = mpic_suspend,
1969};
1970
1971static int mpic_init_sys(void)
1972{
1973        register_syscore_ops(&mpic_syscore_ops);
1974        return 0;
1975}
1976
1977device_initcall(mpic_init_sys);
1978#endif
1979