linux/arch/powerpc/sysdev/mpic.c
<<
>>
Prefs
   1/*
   2 *  arch/powerpc/kernel/mpic.c
   3 *
   4 *  Driver for interrupt controllers following the OpenPIC standard, the
   5 *  common implementation beeing IBM's MPIC. This driver also can deal
   6 *  with various broken implementations of this HW.
   7 *
   8 *  Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
   9 *  Copyright 2010-2012 Freescale Semiconductor, Inc.
  10 *
  11 *  This file is subject to the terms and conditions of the GNU General Public
  12 *  License.  See the file COPYING in the main directory of this archive
  13 *  for more details.
  14 */
  15
  16#undef DEBUG
  17#undef DEBUG_IPI
  18#undef DEBUG_IRQ
  19#undef DEBUG_LOW
  20
  21#include <linux/types.h>
  22#include <linux/kernel.h>
  23#include <linux/init.h>
  24#include <linux/irq.h>
  25#include <linux/smp.h>
  26#include <linux/interrupt.h>
  27#include <linux/bootmem.h>
  28#include <linux/spinlock.h>
  29#include <linux/pci.h>
  30#include <linux/slab.h>
  31#include <linux/syscore_ops.h>
  32#include <linux/ratelimit.h>
  33
  34#include <asm/ptrace.h>
  35#include <asm/signal.h>
  36#include <asm/io.h>
  37#include <asm/pgtable.h>
  38#include <asm/irq.h>
  39#include <asm/machdep.h>
  40#include <asm/mpic.h>
  41#include <asm/smp.h>
  42
  43#include "mpic.h"
  44
  45#ifdef DEBUG
  46#define DBG(fmt...) printk(fmt)
  47#else
  48#define DBG(fmt...)
  49#endif
  50
  51static struct mpic *mpics;
  52static struct mpic *mpic_primary;
  53static DEFINE_RAW_SPINLOCK(mpic_lock);
  54
  55#ifdef CONFIG_PPC32     /* XXX for now */
  56#ifdef CONFIG_IRQ_ALL_CPUS
  57#define distribute_irqs (1)
  58#else
  59#define distribute_irqs (0)
  60#endif
  61#endif
  62
  63#ifdef CONFIG_MPIC_WEIRD
  64static u32 mpic_infos[][MPIC_IDX_END] = {
  65        [0] = { /* Original OpenPIC compatible MPIC */
  66                MPIC_GREG_BASE,
  67                MPIC_GREG_FEATURE_0,
  68                MPIC_GREG_GLOBAL_CONF_0,
  69                MPIC_GREG_VENDOR_ID,
  70                MPIC_GREG_IPI_VECTOR_PRI_0,
  71                MPIC_GREG_IPI_STRIDE,
  72                MPIC_GREG_SPURIOUS,
  73                MPIC_GREG_TIMER_FREQ,
  74
  75                MPIC_TIMER_BASE,
  76                MPIC_TIMER_STRIDE,
  77                MPIC_TIMER_CURRENT_CNT,
  78                MPIC_TIMER_BASE_CNT,
  79                MPIC_TIMER_VECTOR_PRI,
  80                MPIC_TIMER_DESTINATION,
  81
  82                MPIC_CPU_BASE,
  83                MPIC_CPU_STRIDE,
  84                MPIC_CPU_IPI_DISPATCH_0,
  85                MPIC_CPU_IPI_DISPATCH_STRIDE,
  86                MPIC_CPU_CURRENT_TASK_PRI,
  87                MPIC_CPU_WHOAMI,
  88                MPIC_CPU_INTACK,
  89                MPIC_CPU_EOI,
  90                MPIC_CPU_MCACK,
  91
  92                MPIC_IRQ_BASE,
  93                MPIC_IRQ_STRIDE,
  94                MPIC_IRQ_VECTOR_PRI,
  95                MPIC_VECPRI_VECTOR_MASK,
  96                MPIC_VECPRI_POLARITY_POSITIVE,
  97                MPIC_VECPRI_POLARITY_NEGATIVE,
  98                MPIC_VECPRI_SENSE_LEVEL,
  99                MPIC_VECPRI_SENSE_EDGE,
 100                MPIC_VECPRI_POLARITY_MASK,
 101                MPIC_VECPRI_SENSE_MASK,
 102                MPIC_IRQ_DESTINATION
 103        },
 104        [1] = { /* Tsi108/109 PIC */
 105                TSI108_GREG_BASE,
 106                TSI108_GREG_FEATURE_0,
 107                TSI108_GREG_GLOBAL_CONF_0,
 108                TSI108_GREG_VENDOR_ID,
 109                TSI108_GREG_IPI_VECTOR_PRI_0,
 110                TSI108_GREG_IPI_STRIDE,
 111                TSI108_GREG_SPURIOUS,
 112                TSI108_GREG_TIMER_FREQ,
 113
 114                TSI108_TIMER_BASE,
 115                TSI108_TIMER_STRIDE,
 116                TSI108_TIMER_CURRENT_CNT,
 117                TSI108_TIMER_BASE_CNT,
 118                TSI108_TIMER_VECTOR_PRI,
 119                TSI108_TIMER_DESTINATION,
 120
 121                TSI108_CPU_BASE,
 122                TSI108_CPU_STRIDE,
 123                TSI108_CPU_IPI_DISPATCH_0,
 124                TSI108_CPU_IPI_DISPATCH_STRIDE,
 125                TSI108_CPU_CURRENT_TASK_PRI,
 126                TSI108_CPU_WHOAMI,
 127                TSI108_CPU_INTACK,
 128                TSI108_CPU_EOI,
 129                TSI108_CPU_MCACK,
 130
 131                TSI108_IRQ_BASE,
 132                TSI108_IRQ_STRIDE,
 133                TSI108_IRQ_VECTOR_PRI,
 134                TSI108_VECPRI_VECTOR_MASK,
 135                TSI108_VECPRI_POLARITY_POSITIVE,
 136                TSI108_VECPRI_POLARITY_NEGATIVE,
 137                TSI108_VECPRI_SENSE_LEVEL,
 138                TSI108_VECPRI_SENSE_EDGE,
 139                TSI108_VECPRI_POLARITY_MASK,
 140                TSI108_VECPRI_SENSE_MASK,
 141                TSI108_IRQ_DESTINATION
 142        },
 143};
 144
 145#define MPIC_INFO(name) mpic->hw_set[MPIC_IDX_##name]
 146
 147#else /* CONFIG_MPIC_WEIRD */
 148
 149#define MPIC_INFO(name) MPIC_##name
 150
 151#endif /* CONFIG_MPIC_WEIRD */
 152
 153static inline unsigned int mpic_processor_id(struct mpic *mpic)
 154{
 155        unsigned int cpu = 0;
 156
 157        if (!(mpic->flags & MPIC_SECONDARY))
 158                cpu = hard_smp_processor_id();
 159
 160        return cpu;
 161}
 162
 163/*
 164 * Register accessor functions
 165 */
 166
 167
 168static inline u32 _mpic_read(enum mpic_reg_type type,
 169                             struct mpic_reg_bank *rb,
 170                             unsigned int reg)
 171{
 172        switch(type) {
 173#ifdef CONFIG_PPC_DCR
 174        case mpic_access_dcr:
 175                return dcr_read(rb->dhost, reg);
 176#endif
 177        case mpic_access_mmio_be:
 178                return in_be32(rb->base + (reg >> 2));
 179        case mpic_access_mmio_le:
 180        default:
 181                return in_le32(rb->base + (reg >> 2));
 182        }
 183}
 184
 185static inline void _mpic_write(enum mpic_reg_type type,
 186                               struct mpic_reg_bank *rb,
 187                               unsigned int reg, u32 value)
 188{
 189        switch(type) {
 190#ifdef CONFIG_PPC_DCR
 191        case mpic_access_dcr:
 192                dcr_write(rb->dhost, reg, value);
 193                break;
 194#endif
 195        case mpic_access_mmio_be:
 196                out_be32(rb->base + (reg >> 2), value);
 197                break;
 198        case mpic_access_mmio_le:
 199        default:
 200                out_le32(rb->base + (reg >> 2), value);
 201                break;
 202        }
 203}
 204
 205static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
 206{
 207        enum mpic_reg_type type = mpic->reg_type;
 208        unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
 209                              (ipi * MPIC_INFO(GREG_IPI_STRIDE));
 210
 211        if ((mpic->flags & MPIC_BROKEN_IPI) && type == mpic_access_mmio_le)
 212                type = mpic_access_mmio_be;
 213        return _mpic_read(type, &mpic->gregs, offset);
 214}
 215
 216static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
 217{
 218        unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
 219                              (ipi * MPIC_INFO(GREG_IPI_STRIDE));
 220
 221        _mpic_write(mpic->reg_type, &mpic->gregs, offset, value);
 222}
 223
 224static inline unsigned int mpic_tm_offset(struct mpic *mpic, unsigned int tm)
 225{
 226        return (tm >> 2) * MPIC_TIMER_GROUP_STRIDE +
 227               (tm & 3) * MPIC_INFO(TIMER_STRIDE);
 228}
 229
 230static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm)
 231{
 232        unsigned int offset = mpic_tm_offset(mpic, tm) +
 233                              MPIC_INFO(TIMER_VECTOR_PRI);
 234
 235        return _mpic_read(mpic->reg_type, &mpic->tmregs, offset);
 236}
 237
 238static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value)
 239{
 240        unsigned int offset = mpic_tm_offset(mpic, tm) +
 241                              MPIC_INFO(TIMER_VECTOR_PRI);
 242
 243        _mpic_write(mpic->reg_type, &mpic->tmregs, offset, value);
 244}
 245
 246static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
 247{
 248        unsigned int cpu = mpic_processor_id(mpic);
 249
 250        return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg);
 251}
 252
 253static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
 254{
 255        unsigned int cpu = mpic_processor_id(mpic);
 256
 257        _mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value);
 258}
 259
 260static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
 261{
 262        unsigned int    isu = src_no >> mpic->isu_shift;
 263        unsigned int    idx = src_no & mpic->isu_mask;
 264        unsigned int    val;
 265
 266        val = _mpic_read(mpic->reg_type, &mpic->isus[isu],
 267                         reg + (idx * MPIC_INFO(IRQ_STRIDE)));
 268#ifdef CONFIG_MPIC_BROKEN_REGREAD
 269        if (reg == 0)
 270                val = (val & (MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY)) |
 271                        mpic->isu_reg0_shadow[src_no];
 272#endif
 273        return val;
 274}
 275
 276static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
 277                                   unsigned int reg, u32 value)
 278{
 279        unsigned int    isu = src_no >> mpic->isu_shift;
 280        unsigned int    idx = src_no & mpic->isu_mask;
 281
 282        _mpic_write(mpic->reg_type, &mpic->isus[isu],
 283                    reg + (idx * MPIC_INFO(IRQ_STRIDE)), value);
 284
 285#ifdef CONFIG_MPIC_BROKEN_REGREAD
 286        if (reg == 0)
 287                mpic->isu_reg0_shadow[src_no] =
 288                        value & ~(MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY);
 289#endif
 290}
 291
 292#define mpic_read(b,r)          _mpic_read(mpic->reg_type,&(b),(r))
 293#define mpic_write(b,r,v)       _mpic_write(mpic->reg_type,&(b),(r),(v))
 294#define mpic_ipi_read(i)        _mpic_ipi_read(mpic,(i))
 295#define mpic_ipi_write(i,v)     _mpic_ipi_write(mpic,(i),(v))
 296#define mpic_tm_read(i)         _mpic_tm_read(mpic,(i))
 297#define mpic_tm_write(i,v)      _mpic_tm_write(mpic,(i),(v))
 298#define mpic_cpu_read(i)        _mpic_cpu_read(mpic,(i))
 299#define mpic_cpu_write(i,v)     _mpic_cpu_write(mpic,(i),(v))
 300#define mpic_irq_read(s,r)      _mpic_irq_read(mpic,(s),(r))
 301#define mpic_irq_write(s,r,v)   _mpic_irq_write(mpic,(s),(r),(v))
 302
 303
 304/*
 305 * Low level utility functions
 306 */
 307
 308
 309static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr,
 310                           struct mpic_reg_bank *rb, unsigned int offset,
 311                           unsigned int size)
 312{
 313        rb->base = ioremap(phys_addr + offset, size);
 314        BUG_ON(rb->base == NULL);
 315}
 316
 317#ifdef CONFIG_PPC_DCR
 318static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
 319                          unsigned int offset, unsigned int size)
 320{
 321        phys_addr_t phys_addr = dcr_resource_start(mpic->node, 0);
 322        rb->dhost = dcr_map(mpic->node, phys_addr + offset, size);
 323        BUG_ON(!DCR_MAP_OK(rb->dhost));
 324}
 325
 326static inline void mpic_map(struct mpic *mpic,
 327                            phys_addr_t phys_addr, struct mpic_reg_bank *rb,
 328                            unsigned int offset, unsigned int size)
 329{
 330        if (mpic->flags & MPIC_USES_DCR)
 331                _mpic_map_dcr(mpic, rb, offset, size);
 332        else
 333                _mpic_map_mmio(mpic, phys_addr, rb, offset, size);
 334}
 335#else /* CONFIG_PPC_DCR */
 336#define mpic_map(m,p,b,o,s)     _mpic_map_mmio(m,p,b,o,s)
 337#endif /* !CONFIG_PPC_DCR */
 338
 339
 340
 341/* Check if we have one of those nice broken MPICs with a flipped endian on
 342 * reads from IPI registers
 343 */
 344static void __init mpic_test_broken_ipi(struct mpic *mpic)
 345{
 346        u32 r;
 347
 348        mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK);
 349        r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0));
 350
 351        if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
 352                printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
 353                mpic->flags |= MPIC_BROKEN_IPI;
 354        }
 355}
 356
 357#ifdef CONFIG_MPIC_U3_HT_IRQS
 358
 359/* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
 360 * to force the edge setting on the MPIC and do the ack workaround.
 361 */
 362static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
 363{
 364        if (source >= 128 || !mpic->fixups)
 365                return 0;
 366        return mpic->fixups[source].base != NULL;
 367}
 368
 369
 370static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
 371{
 372        struct mpic_irq_fixup *fixup = &mpic->fixups[source];
 373
 374        if (fixup->applebase) {
 375                unsigned int soff = (fixup->index >> 3) & ~3;
 376                unsigned int mask = 1U << (fixup->index & 0x1f);
 377                writel(mask, fixup->applebase + soff);
 378        } else {
 379                raw_spin_lock(&mpic->fixup_lock);
 380                writeb(0x11 + 2 * fixup->index, fixup->base + 2);
 381                writel(fixup->data, fixup->base + 4);
 382                raw_spin_unlock(&mpic->fixup_lock);
 383        }
 384}
 385
 386static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
 387                                      bool level)
 388{
 389        struct mpic_irq_fixup *fixup = &mpic->fixups[source];
 390        unsigned long flags;
 391        u32 tmp;
 392
 393        if (fixup->base == NULL)
 394                return;
 395
 396        DBG("startup_ht_interrupt(0x%x) index: %d\n",
 397            source, fixup->index);
 398        raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
 399        /* Enable and configure */
 400        writeb(0x10 + 2 * fixup->index, fixup->base + 2);
 401        tmp = readl(fixup->base + 4);
 402        tmp &= ~(0x23U);
 403        if (level)
 404                tmp |= 0x22;
 405        writel(tmp, fixup->base + 4);
 406        raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags);
 407
 408#ifdef CONFIG_PM
 409        /* use the lowest bit inverted to the actual HW,
 410         * set if this fixup was enabled, clear otherwise */
 411        mpic->save_data[source].fixup_data = tmp | 1;
 412#endif
 413}
 414
 415static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source)
 416{
 417        struct mpic_irq_fixup *fixup = &mpic->fixups[source];
 418        unsigned long flags;
 419        u32 tmp;
 420
 421        if (fixup->base == NULL)
 422                return;
 423
 424        DBG("shutdown_ht_interrupt(0x%x)\n", source);
 425
 426        /* Disable */
 427        raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
 428        writeb(0x10 + 2 * fixup->index, fixup->base + 2);
 429        tmp = readl(fixup->base + 4);
 430        tmp |= 1;
 431        writel(tmp, fixup->base + 4);
 432        raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags);
 433
 434#ifdef CONFIG_PM
 435        /* use the lowest bit inverted to the actual HW,
 436         * set if this fixup was enabled, clear otherwise */
 437        mpic->save_data[source].fixup_data = tmp & ~1;
 438#endif
 439}
 440
 441#ifdef CONFIG_PCI_MSI
 442static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase,
 443                                    unsigned int devfn)
 444{
 445        u8 __iomem *base;
 446        u8 pos, flags;
 447        u64 addr = 0;
 448
 449        for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
 450             pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
 451                u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
 452                if (id == PCI_CAP_ID_HT) {
 453                        id = readb(devbase + pos + 3);
 454                        if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_MSI_MAPPING)
 455                                break;
 456                }
 457        }
 458
 459        if (pos == 0)
 460                return;
 461
 462        base = devbase + pos;
 463
 464        flags = readb(base + HT_MSI_FLAGS);
 465        if (!(flags & HT_MSI_FLAGS_FIXED)) {
 466                addr = readl(base + HT_MSI_ADDR_LO) & HT_MSI_ADDR_LO_MASK;
 467                addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32);
 468        }
 469
 470        printk(KERN_DEBUG "mpic:   - HT:%02x.%x %s MSI mapping found @ 0x%llx\n",
 471                PCI_SLOT(devfn), PCI_FUNC(devfn),
 472                flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr);
 473
 474        if (!(flags & HT_MSI_FLAGS_ENABLE))
 475                writeb(flags | HT_MSI_FLAGS_ENABLE, base + HT_MSI_FLAGS);
 476}
 477#else
 478static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase,
 479                                    unsigned int devfn)
 480{
 481        return;
 482}
 483#endif
 484
 485static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
 486                                    unsigned int devfn, u32 vdid)
 487{
 488        int i, irq, n;
 489        u8 __iomem *base;
 490        u32 tmp;
 491        u8 pos;
 492
 493        for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
 494             pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
 495                u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
 496                if (id == PCI_CAP_ID_HT) {
 497                        id = readb(devbase + pos + 3);
 498                        if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_IRQ)
 499                                break;
 500                }
 501        }
 502        if (pos == 0)
 503                return;
 504
 505        base = devbase + pos;
 506        writeb(0x01, base + 2);
 507        n = (readl(base + 4) >> 16) & 0xff;
 508
 509        printk(KERN_INFO "mpic:   - HT:%02x.%x [0x%02x] vendor %04x device %04x"
 510               " has %d irqs\n",
 511               devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1);
 512
 513        for (i = 0; i <= n; i++) {
 514                writeb(0x10 + 2 * i, base + 2);
 515                tmp = readl(base + 4);
 516                irq = (tmp >> 16) & 0xff;
 517                DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp);
 518                /* mask it , will be unmasked later */
 519                tmp |= 0x1;
 520                writel(tmp, base + 4);
 521                mpic->fixups[irq].index = i;
 522                mpic->fixups[irq].base = base;
 523                /* Apple HT PIC has a non-standard way of doing EOIs */
 524                if ((vdid & 0xffff) == 0x106b)
 525                        mpic->fixups[irq].applebase = devbase + 0x60;
 526                else
 527                        mpic->fixups[irq].applebase = NULL;
 528                writeb(0x11 + 2 * i, base + 2);
 529                mpic->fixups[irq].data = readl(base + 4) | 0x80000000;
 530        }
 531}
 532 
 533
 534static void __init mpic_scan_ht_pics(struct mpic *mpic)
 535{
 536        unsigned int devfn;
 537        u8 __iomem *cfgspace;
 538
 539        printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
 540
 541        /* Allocate fixups array */
 542        mpic->fixups = kzalloc(128 * sizeof(*mpic->fixups), GFP_KERNEL);
 543        BUG_ON(mpic->fixups == NULL);
 544
 545        /* Init spinlock */
 546        raw_spin_lock_init(&mpic->fixup_lock);
 547
 548        /* Map U3 config space. We assume all IO-APICs are on the primary bus
 549         * so we only need to map 64kB.
 550         */
 551        cfgspace = ioremap(0xf2000000, 0x10000);
 552        BUG_ON(cfgspace == NULL);
 553
 554        /* Now we scan all slots. We do a very quick scan, we read the header
 555         * type, vendor ID and device ID only, that's plenty enough
 556         */
 557        for (devfn = 0; devfn < 0x100; devfn++) {
 558                u8 __iomem *devbase = cfgspace + (devfn << 8);
 559                u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
 560                u32 l = readl(devbase + PCI_VENDOR_ID);
 561                u16 s;
 562
 563                DBG("devfn %x, l: %x\n", devfn, l);
 564
 565                /* If no device, skip */
 566                if (l == 0xffffffff || l == 0x00000000 ||
 567                    l == 0x0000ffff || l == 0xffff0000)
 568                        goto next;
 569                /* Check if is supports capability lists */
 570                s = readw(devbase + PCI_STATUS);
 571                if (!(s & PCI_STATUS_CAP_LIST))
 572                        goto next;
 573
 574                mpic_scan_ht_pic(mpic, devbase, devfn, l);
 575                mpic_scan_ht_msi(mpic, devbase, devfn);
 576
 577        next:
 578                /* next device, if function 0 */
 579                if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0)
 580                        devfn += 7;
 581        }
 582}
 583
 584#else /* CONFIG_MPIC_U3_HT_IRQS */
 585
 586static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
 587{
 588        return 0;
 589}
 590
 591static void __init mpic_scan_ht_pics(struct mpic *mpic)
 592{
 593}
 594
 595#endif /* CONFIG_MPIC_U3_HT_IRQS */
 596
 597/* Find an mpic associated with a given linux interrupt */
 598static struct mpic *mpic_find(unsigned int irq)
 599{
 600        if (irq < NUM_ISA_INTERRUPTS)
 601                return NULL;
 602
 603        return irq_get_chip_data(irq);
 604}
 605
 606/* Determine if the linux irq is an IPI */
 607static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int src)
 608{
 609        return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]);
 610}
 611
 612/* Determine if the linux irq is a timer */
 613static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int src)
 614{
 615        return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]);
 616}
 617
 618/* Convert a cpu mask from logical to physical cpu numbers. */
 619static inline u32 mpic_physmask(u32 cpumask)
 620{
 621        int i;
 622        u32 mask = 0;
 623
 624        for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
 625                mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
 626        return mask;
 627}
 628
 629#ifdef CONFIG_SMP
 630/* Get the mpic structure from the IPI number */
 631static inline struct mpic * mpic_from_ipi(struct irq_data *d)
 632{
 633        return irq_data_get_irq_chip_data(d);
 634}
 635#endif
 636
 637/* Get the mpic structure from the irq number */
 638static inline struct mpic * mpic_from_irq(unsigned int irq)
 639{
 640        return irq_get_chip_data(irq);
 641}
 642
 643/* Get the mpic structure from the irq data */
 644static inline struct mpic * mpic_from_irq_data(struct irq_data *d)
 645{
 646        return irq_data_get_irq_chip_data(d);
 647}
 648
 649/* Send an EOI */
 650static inline void mpic_eoi(struct mpic *mpic)
 651{
 652        mpic_cpu_write(MPIC_INFO(CPU_EOI), 0);
 653        (void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI));
 654}
 655
 656/*
 657 * Linux descriptor level callbacks
 658 */
 659
 660
 661void mpic_unmask_irq(struct irq_data *d)
 662{
 663        unsigned int loops = 100000;
 664        struct mpic *mpic = mpic_from_irq_data(d);
 665        unsigned int src = irqd_to_hwirq(d);
 666
 667        DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src);
 668
 669        mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
 670                       mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) &
 671                       ~MPIC_VECPRI_MASK);
 672        /* make sure mask gets to controller before we return to user */
 673        do {
 674                if (!loops--) {
 675                        printk(KERN_ERR "%s: timeout on hwirq %u\n",
 676                               __func__, src);
 677                        break;
 678                }
 679        } while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK);
 680}
 681
 682void mpic_mask_irq(struct irq_data *d)
 683{
 684        unsigned int loops = 100000;
 685        struct mpic *mpic = mpic_from_irq_data(d);
 686        unsigned int src = irqd_to_hwirq(d);
 687
 688        DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src);
 689
 690        mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
 691                       mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) |
 692                       MPIC_VECPRI_MASK);
 693
 694        /* make sure mask gets to controller before we return to user */
 695        do {
 696                if (!loops--) {
 697                        printk(KERN_ERR "%s: timeout on hwirq %u\n",
 698                               __func__, src);
 699                        break;
 700                }
 701        } while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK));
 702}
 703
 704void mpic_end_irq(struct irq_data *d)
 705{
 706        struct mpic *mpic = mpic_from_irq_data(d);
 707
 708#ifdef DEBUG_IRQ
 709        DBG("%s: end_irq: %d\n", mpic->name, d->irq);
 710#endif
 711        /* We always EOI on end_irq() even for edge interrupts since that
 712         * should only lower the priority, the MPIC should have properly
 713         * latched another edge interrupt coming in anyway
 714         */
 715
 716        mpic_eoi(mpic);
 717}
 718
 719#ifdef CONFIG_MPIC_U3_HT_IRQS
 720
 721static void mpic_unmask_ht_irq(struct irq_data *d)
 722{
 723        struct mpic *mpic = mpic_from_irq_data(d);
 724        unsigned int src = irqd_to_hwirq(d);
 725
 726        mpic_unmask_irq(d);
 727
 728        if (irqd_is_level_type(d))
 729                mpic_ht_end_irq(mpic, src);
 730}
 731
 732static unsigned int mpic_startup_ht_irq(struct irq_data *d)
 733{
 734        struct mpic *mpic = mpic_from_irq_data(d);
 735        unsigned int src = irqd_to_hwirq(d);
 736
 737        mpic_unmask_irq(d);
 738        mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d));
 739
 740        return 0;
 741}
 742
 743static void mpic_shutdown_ht_irq(struct irq_data *d)
 744{
 745        struct mpic *mpic = mpic_from_irq_data(d);
 746        unsigned int src = irqd_to_hwirq(d);
 747
 748        mpic_shutdown_ht_interrupt(mpic, src);
 749        mpic_mask_irq(d);
 750}
 751
 752static void mpic_end_ht_irq(struct irq_data *d)
 753{
 754        struct mpic *mpic = mpic_from_irq_data(d);
 755        unsigned int src = irqd_to_hwirq(d);
 756
 757#ifdef DEBUG_IRQ
 758        DBG("%s: end_irq: %d\n", mpic->name, d->irq);
 759#endif
 760        /* We always EOI on end_irq() even for edge interrupts since that
 761         * should only lower the priority, the MPIC should have properly
 762         * latched another edge interrupt coming in anyway
 763         */
 764
 765        if (irqd_is_level_type(d))
 766                mpic_ht_end_irq(mpic, src);
 767        mpic_eoi(mpic);
 768}
 769#endif /* !CONFIG_MPIC_U3_HT_IRQS */
 770
 771#ifdef CONFIG_SMP
 772
 773static void mpic_unmask_ipi(struct irq_data *d)
 774{
 775        struct mpic *mpic = mpic_from_ipi(d);
 776        unsigned int src = virq_to_hw(d->irq) - mpic->ipi_vecs[0];
 777
 778        DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src);
 779        mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
 780}
 781
 782static void mpic_mask_ipi(struct irq_data *d)
 783{
 784        /* NEVER disable an IPI... that's just plain wrong! */
 785}
 786
 787static void mpic_end_ipi(struct irq_data *d)
 788{
 789        struct mpic *mpic = mpic_from_ipi(d);
 790
 791        /*
 792         * IPIs are marked IRQ_PER_CPU. This has the side effect of
 793         * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
 794         * applying to them. We EOI them late to avoid re-entering.
 795         */
 796        mpic_eoi(mpic);
 797}
 798
 799#endif /* CONFIG_SMP */
 800
 801static void mpic_unmask_tm(struct irq_data *d)
 802{
 803        struct mpic *mpic = mpic_from_irq_data(d);
 804        unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
 805
 806        DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, d->irq, src);
 807        mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK);
 808        mpic_tm_read(src);
 809}
 810
 811static void mpic_mask_tm(struct irq_data *d)
 812{
 813        struct mpic *mpic = mpic_from_irq_data(d);
 814        unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
 815
 816        mpic_tm_write(src, mpic_tm_read(src) | MPIC_VECPRI_MASK);
 817        mpic_tm_read(src);
 818}
 819
 820int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
 821                      bool force)
 822{
 823        struct mpic *mpic = mpic_from_irq_data(d);
 824        unsigned int src = irqd_to_hwirq(d);
 825
 826        if (mpic->flags & MPIC_SINGLE_DEST_CPU) {
 827                int cpuid = irq_choose_cpu(cpumask);
 828
 829                mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
 830        } else {
 831                u32 mask = cpumask_bits(cpumask)[0];
 832
 833                mask &= cpumask_bits(cpu_online_mask)[0];
 834
 835                mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
 836                               mpic_physmask(mask));
 837        }
 838
 839        return IRQ_SET_MASK_OK;
 840}
 841
 842static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
 843{
 844        /* Now convert sense value */
 845        switch(type & IRQ_TYPE_SENSE_MASK) {
 846        case IRQ_TYPE_EDGE_RISING:
 847                return MPIC_INFO(VECPRI_SENSE_EDGE) |
 848                       MPIC_INFO(VECPRI_POLARITY_POSITIVE);
 849        case IRQ_TYPE_EDGE_FALLING:
 850        case IRQ_TYPE_EDGE_BOTH:
 851                return MPIC_INFO(VECPRI_SENSE_EDGE) |
 852                       MPIC_INFO(VECPRI_POLARITY_NEGATIVE);
 853        case IRQ_TYPE_LEVEL_HIGH:
 854                return MPIC_INFO(VECPRI_SENSE_LEVEL) |
 855                       MPIC_INFO(VECPRI_POLARITY_POSITIVE);
 856        case IRQ_TYPE_LEVEL_LOW:
 857        default:
 858                return MPIC_INFO(VECPRI_SENSE_LEVEL) |
 859                       MPIC_INFO(VECPRI_POLARITY_NEGATIVE);
 860        }
 861}
 862
 863int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
 864{
 865        struct mpic *mpic = mpic_from_irq_data(d);
 866        unsigned int src = irqd_to_hwirq(d);
 867        unsigned int vecpri, vold, vnew;
 868
 869        DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n",
 870            mpic, d->irq, src, flow_type);
 871
 872        if (src >= mpic->num_sources)
 873                return -EINVAL;
 874
 875        vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
 876
 877        /* We don't support "none" type */
 878        if (flow_type == IRQ_TYPE_NONE)
 879                flow_type = IRQ_TYPE_DEFAULT;
 880
 881        /* Default: read HW settings */
 882        if (flow_type == IRQ_TYPE_DEFAULT) {
 883                switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
 884                               MPIC_INFO(VECPRI_SENSE_MASK))) {
 885                        case MPIC_INFO(VECPRI_SENSE_EDGE) |
 886                             MPIC_INFO(VECPRI_POLARITY_POSITIVE):
 887                                flow_type = IRQ_TYPE_EDGE_RISING;
 888                                break;
 889                        case MPIC_INFO(VECPRI_SENSE_EDGE) |
 890                             MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
 891                                flow_type = IRQ_TYPE_EDGE_FALLING;
 892                                break;
 893                        case MPIC_INFO(VECPRI_SENSE_LEVEL) |
 894                             MPIC_INFO(VECPRI_POLARITY_POSITIVE):
 895                                flow_type = IRQ_TYPE_LEVEL_HIGH;
 896                                break;
 897                        case MPIC_INFO(VECPRI_SENSE_LEVEL) |
 898                             MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
 899                                flow_type = IRQ_TYPE_LEVEL_LOW;
 900                                break;
 901                }
 902        }
 903
 904        /* Apply to irq desc */
 905        irqd_set_trigger_type(d, flow_type);
 906
 907        /* Apply to HW */
 908        if (mpic_is_ht_interrupt(mpic, src))
 909                vecpri = MPIC_VECPRI_POLARITY_POSITIVE |
 910                        MPIC_VECPRI_SENSE_EDGE;
 911        else
 912                vecpri = mpic_type_to_vecpri(mpic, flow_type);
 913
 914        vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) |
 915                        MPIC_INFO(VECPRI_SENSE_MASK));
 916        vnew |= vecpri;
 917        if (vold != vnew)
 918                mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew);
 919
 920        return IRQ_SET_MASK_OK_NOCOPY;
 921}
 922
 923void mpic_set_vector(unsigned int virq, unsigned int vector)
 924{
 925        struct mpic *mpic = mpic_from_irq(virq);
 926        unsigned int src = virq_to_hw(virq);
 927        unsigned int vecpri;
 928
 929        DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n",
 930            mpic, virq, src, vector);
 931
 932        if (src >= mpic->num_sources)
 933                return;
 934
 935        vecpri = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
 936        vecpri = vecpri & ~MPIC_INFO(VECPRI_VECTOR_MASK);
 937        vecpri |= vector;
 938        mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
 939}
 940
 941void mpic_set_destination(unsigned int virq, unsigned int cpuid)
 942{
 943        struct mpic *mpic = mpic_from_irq(virq);
 944        unsigned int src = virq_to_hw(virq);
 945
 946        DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n",
 947            mpic, virq, src, cpuid);
 948
 949        if (src >= mpic->num_sources)
 950                return;
 951
 952        mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
 953}
 954
 955static struct irq_chip mpic_irq_chip = {
 956        .irq_mask       = mpic_mask_irq,
 957        .irq_unmask     = mpic_unmask_irq,
 958        .irq_eoi        = mpic_end_irq,
 959        .irq_set_type   = mpic_set_irq_type,
 960};
 961
 962#ifdef CONFIG_SMP
 963static struct irq_chip mpic_ipi_chip = {
 964        .irq_mask       = mpic_mask_ipi,
 965        .irq_unmask     = mpic_unmask_ipi,
 966        .irq_eoi        = mpic_end_ipi,
 967};
 968#endif /* CONFIG_SMP */
 969
 970static struct irq_chip mpic_tm_chip = {
 971        .irq_mask       = mpic_mask_tm,
 972        .irq_unmask     = mpic_unmask_tm,
 973        .irq_eoi        = mpic_end_irq,
 974};
 975
 976#ifdef CONFIG_MPIC_U3_HT_IRQS
 977static struct irq_chip mpic_irq_ht_chip = {
 978        .irq_startup    = mpic_startup_ht_irq,
 979        .irq_shutdown   = mpic_shutdown_ht_irq,
 980        .irq_mask       = mpic_mask_irq,
 981        .irq_unmask     = mpic_unmask_ht_irq,
 982        .irq_eoi        = mpic_end_ht_irq,
 983        .irq_set_type   = mpic_set_irq_type,
 984};
 985#endif /* CONFIG_MPIC_U3_HT_IRQS */
 986
 987
 988static int mpic_host_match(struct irq_domain *h, struct device_node *node)
 989{
 990        /* Exact match, unless mpic node is NULL */
 991        return h->of_node == NULL || h->of_node == node;
 992}
 993
 994static int mpic_host_map(struct irq_domain *h, unsigned int virq,
 995                         irq_hw_number_t hw)
 996{
 997        struct mpic *mpic = h->host_data;
 998        struct irq_chip *chip;
 999
1000        DBG("mpic: map virq %d, hwirq 0x%lx\n", virq, hw);
1001
1002        if (hw == mpic->spurious_vec)
1003                return -EINVAL;
1004        if (mpic->protected && test_bit(hw, mpic->protected)) {
1005                pr_warning("mpic: Mapping of source 0x%x failed, "
1006                           "source protected by firmware !\n",\
1007                           (unsigned int)hw);
1008                return -EPERM;
1009        }
1010
1011#ifdef CONFIG_SMP
1012        else if (hw >= mpic->ipi_vecs[0]) {
1013                WARN_ON(mpic->flags & MPIC_SECONDARY);
1014
1015                DBG("mpic: mapping as IPI\n");
1016                irq_set_chip_data(virq, mpic);
1017                irq_set_chip_and_handler(virq, &mpic->hc_ipi,
1018                                         handle_percpu_irq);
1019                return 0;
1020        }
1021#endif /* CONFIG_SMP */
1022
1023        if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) {
1024                WARN_ON(mpic->flags & MPIC_SECONDARY);
1025
1026                DBG("mpic: mapping as timer\n");
1027                irq_set_chip_data(virq, mpic);
1028                irq_set_chip_and_handler(virq, &mpic->hc_tm,
1029                                         handle_fasteoi_irq);
1030                return 0;
1031        }
1032
1033        if (mpic_map_error_int(mpic, virq, hw))
1034                return 0;
1035
1036        if (hw >= mpic->num_sources) {
1037                pr_warning("mpic: Mapping of source 0x%x failed, "
1038                           "source out of range !\n",\
1039                           (unsigned int)hw);
1040                return -EINVAL;
1041        }
1042
1043        mpic_msi_reserve_hwirq(mpic, hw);
1044
1045        /* Default chip */
1046        chip = &mpic->hc_irq;
1047
1048#ifdef CONFIG_MPIC_U3_HT_IRQS
1049        /* Check for HT interrupts, override vecpri */
1050        if (mpic_is_ht_interrupt(mpic, hw))
1051                chip = &mpic->hc_ht_irq;
1052#endif /* CONFIG_MPIC_U3_HT_IRQS */
1053
1054        DBG("mpic: mapping to irq chip @%p\n", chip);
1055
1056        irq_set_chip_data(virq, mpic);
1057        irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
1058
1059        /* Set default irq type */
1060        irq_set_irq_type(virq, IRQ_TYPE_DEFAULT);
1061
1062        /* If the MPIC was reset, then all vectors have already been
1063         * initialized.  Otherwise, a per source lazy initialization
1064         * is done here.
1065         */
1066        if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) {
1067                mpic_set_vector(virq, hw);
1068                mpic_set_destination(virq, mpic_processor_id(mpic));
1069                mpic_irq_set_priority(virq, 8);
1070        }
1071
1072        return 0;
1073}
1074
1075static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct,
1076                           const u32 *intspec, unsigned int intsize,
1077                           irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1078
1079{
1080        struct mpic *mpic = h->host_data;
1081        static unsigned char map_mpic_senses[4] = {
1082                IRQ_TYPE_EDGE_RISING,
1083                IRQ_TYPE_LEVEL_LOW,
1084                IRQ_TYPE_LEVEL_HIGH,
1085                IRQ_TYPE_EDGE_FALLING,
1086        };
1087
1088        *out_hwirq = intspec[0];
1089        if (intsize >= 4 && (mpic->flags & MPIC_FSL)) {
1090                /*
1091                 * Freescale MPIC with extended intspec:
1092                 * First two cells are as usual.  Third specifies
1093                 * an "interrupt type".  Fourth is type-specific data.
1094                 *
1095                 * See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
1096                 */
1097                switch (intspec[2]) {
1098                case 0:
1099                        break;
1100                case 1:
1101                        if (!(mpic->flags & MPIC_FSL_HAS_EIMR))
1102                                break;
1103
1104                        if (intspec[3] >= ARRAY_SIZE(mpic->err_int_vecs))
1105                                return -EINVAL;
1106
1107                        *out_hwirq = mpic->err_int_vecs[intspec[3]];
1108
1109                        break;
1110                case 2:
1111                        if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs))
1112                                return -EINVAL;
1113
1114                        *out_hwirq = mpic->ipi_vecs[intspec[0]];
1115                        break;
1116                case 3:
1117                        if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs))
1118                                return -EINVAL;
1119
1120                        *out_hwirq = mpic->timer_vecs[intspec[0]];
1121                        break;
1122                default:
1123                        pr_debug("%s: unknown irq type %u\n",
1124                                 __func__, intspec[2]);
1125                        return -EINVAL;
1126                }
1127
1128                *out_flags = map_mpic_senses[intspec[1] & 3];
1129        } else if (intsize > 1) {
1130                u32 mask = 0x3;
1131
1132                /* Apple invented a new race of encoding on machines with
1133                 * an HT APIC. They encode, among others, the index within
1134                 * the HT APIC. We don't care about it here since thankfully,
1135                 * it appears that they have the APIC already properly
1136                 * configured, and thus our current fixup code that reads the
1137                 * APIC config works fine. However, we still need to mask out
1138                 * bits in the specifier to make sure we only get bit 0 which
1139                 * is the level/edge bit (the only sense bit exposed by Apple),
1140                 * as their bit 1 means something else.
1141                 */
1142                if (machine_is(powermac))
1143                        mask = 0x1;
1144                *out_flags = map_mpic_senses[intspec[1] & mask];
1145        } else
1146                *out_flags = IRQ_TYPE_NONE;
1147
1148        DBG("mpic: xlate (%d cells: 0x%08x 0x%08x) to line 0x%lx sense 0x%x\n",
1149            intsize, intspec[0], intspec[1], *out_hwirq, *out_flags);
1150
1151        return 0;
1152}
1153
1154/* IRQ handler for a secondary MPIC cascaded from another IRQ controller */
1155static void mpic_cascade(unsigned int irq, struct irq_desc *desc)
1156{
1157        struct irq_chip *chip = irq_desc_get_chip(desc);
1158        struct mpic *mpic = irq_desc_get_handler_data(desc);
1159        unsigned int virq;
1160
1161        BUG_ON(!(mpic->flags & MPIC_SECONDARY));
1162
1163        virq = mpic_get_one_irq(mpic);
1164        if (virq)
1165                generic_handle_irq(virq);
1166
1167        chip->irq_eoi(&desc->irq_data);
1168}
1169
1170static struct irq_domain_ops mpic_host_ops = {
1171        .match = mpic_host_match,
1172        .map = mpic_host_map,
1173        .xlate = mpic_host_xlate,
1174};
1175
1176/*
1177 * Exported functions
1178 */
1179
1180struct mpic * __init mpic_alloc(struct device_node *node,
1181                                phys_addr_t phys_addr,
1182                                unsigned int flags,
1183                                unsigned int isu_size,
1184                                unsigned int irq_count,
1185                                const char *name)
1186{
1187        int i, psize, intvec_top;
1188        struct mpic *mpic;
1189        u32 greg_feature;
1190        const char *vers;
1191        const u32 *psrc;
1192        u32 last_irq;
1193        u32 fsl_version = 0;
1194
1195        /* Default MPIC search parameters */
1196        static const struct of_device_id __initconst mpic_device_id[] = {
1197                { .type       = "open-pic", },
1198                { .compatible = "open-pic", },
1199                {},
1200        };
1201
1202        /*
1203         * If we were not passed a device-tree node, then perform the default
1204         * search for standardized a standardized OpenPIC.
1205         */
1206        if (node) {
1207                node = of_node_get(node);
1208        } else {
1209                node = of_find_matching_node(NULL, mpic_device_id);
1210                if (!node)
1211                        return NULL;
1212        }
1213
1214        /* Pick the physical address from the device tree if unspecified */
1215        if (!phys_addr) {
1216                /* Check if it is DCR-based */
1217                if (of_get_property(node, "dcr-reg", NULL)) {
1218                        flags |= MPIC_USES_DCR;
1219                } else {
1220                        struct resource r;
1221                        if (of_address_to_resource(node, 0, &r))
1222                                goto err_of_node_put;
1223                        phys_addr = r.start;
1224                }
1225        }
1226
1227        /* Read extra device-tree properties into the flags variable */
1228        if (of_get_property(node, "big-endian", NULL))
1229                flags |= MPIC_BIG_ENDIAN;
1230        if (of_get_property(node, "pic-no-reset", NULL))
1231                flags |= MPIC_NO_RESET;
1232        if (of_get_property(node, "single-cpu-affinity", NULL))
1233                flags |= MPIC_SINGLE_DEST_CPU;
1234        if (of_device_is_compatible(node, "fsl,mpic"))
1235                flags |= MPIC_FSL | MPIC_LARGE_VECTORS;
1236
1237        mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL);
1238        if (mpic == NULL)
1239                goto err_of_node_put;
1240
1241        mpic->name = name;
1242        mpic->node = node;
1243        mpic->paddr = phys_addr;
1244        mpic->flags = flags;
1245
1246        mpic->hc_irq = mpic_irq_chip;
1247        mpic->hc_irq.name = name;
1248        if (!(mpic->flags & MPIC_SECONDARY))
1249                mpic->hc_irq.irq_set_affinity = mpic_set_affinity;
1250#ifdef CONFIG_MPIC_U3_HT_IRQS
1251        mpic->hc_ht_irq = mpic_irq_ht_chip;
1252        mpic->hc_ht_irq.name = name;
1253        if (!(mpic->flags & MPIC_SECONDARY))
1254                mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity;
1255#endif /* CONFIG_MPIC_U3_HT_IRQS */
1256
1257#ifdef CONFIG_SMP
1258        mpic->hc_ipi = mpic_ipi_chip;
1259        mpic->hc_ipi.name = name;
1260#endif /* CONFIG_SMP */
1261
1262        mpic->hc_tm = mpic_tm_chip;
1263        mpic->hc_tm.name = name;
1264
1265        mpic->num_sources = 0; /* so far */
1266
1267        if (mpic->flags & MPIC_LARGE_VECTORS)
1268                intvec_top = 2047;
1269        else
1270                intvec_top = 255;
1271
1272        mpic->timer_vecs[0] = intvec_top - 12;
1273        mpic->timer_vecs[1] = intvec_top - 11;
1274        mpic->timer_vecs[2] = intvec_top - 10;
1275        mpic->timer_vecs[3] = intvec_top - 9;
1276        mpic->timer_vecs[4] = intvec_top - 8;
1277        mpic->timer_vecs[5] = intvec_top - 7;
1278        mpic->timer_vecs[6] = intvec_top - 6;
1279        mpic->timer_vecs[7] = intvec_top - 5;
1280        mpic->ipi_vecs[0]   = intvec_top - 4;
1281        mpic->ipi_vecs[1]   = intvec_top - 3;
1282        mpic->ipi_vecs[2]   = intvec_top - 2;
1283        mpic->ipi_vecs[3]   = intvec_top - 1;
1284        mpic->spurious_vec  = intvec_top;
1285
1286        /* Look for protected sources */
1287        psrc = of_get_property(mpic->node, "protected-sources", &psize);
1288        if (psrc) {
1289                /* Allocate a bitmap with one bit per interrupt */
1290                unsigned int mapsize = BITS_TO_LONGS(intvec_top + 1);
1291                mpic->protected = kzalloc(mapsize*sizeof(long), GFP_KERNEL);
1292                BUG_ON(mpic->protected == NULL);
1293                for (i = 0; i < psize/sizeof(u32); i++) {
1294                        if (psrc[i] > intvec_top)
1295                                continue;
1296                        __set_bit(psrc[i], mpic->protected);
1297                }
1298        }
1299
1300#ifdef CONFIG_MPIC_WEIRD
1301        mpic->hw_set = mpic_infos[MPIC_GET_REGSET(mpic->flags)];
1302#endif
1303
1304        /* default register type */
1305        if (mpic->flags & MPIC_BIG_ENDIAN)
1306                mpic->reg_type = mpic_access_mmio_be;
1307        else
1308                mpic->reg_type = mpic_access_mmio_le;
1309
1310        /*
1311         * An MPIC with a "dcr-reg" property must be accessed that way, but
1312         * only if the kernel includes DCR support.
1313         */
1314#ifdef CONFIG_PPC_DCR
1315        if (mpic->flags & MPIC_USES_DCR)
1316                mpic->reg_type = mpic_access_dcr;
1317#else
1318        BUG_ON(mpic->flags & MPIC_USES_DCR);
1319#endif
1320
1321        /* Map the global registers */
1322        mpic_map(mpic, mpic->paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
1323        mpic_map(mpic, mpic->paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
1324
1325        if (mpic->flags & MPIC_FSL) {
1326                u32 brr1;
1327                int ret;
1328
1329                /*
1330                 * Yes, Freescale really did put global registers in the
1331                 * magic per-cpu area -- and they don't even show up in the
1332                 * non-magic per-cpu copies that this driver normally uses.
1333                 */
1334                mpic_map(mpic, mpic->paddr, &mpic->thiscpuregs,
1335                         MPIC_CPU_THISBASE, 0x1000);
1336
1337                brr1 = _mpic_read(mpic->reg_type, &mpic->thiscpuregs,
1338                                MPIC_FSL_BRR1);
1339                fsl_version = brr1 & MPIC_FSL_BRR1_VER;
1340
1341                /* Error interrupt mask register (EIMR) is required for
1342                 * handling individual device error interrupts. EIMR
1343                 * was added in MPIC version 4.1.
1344                 *
1345                 * Over here we reserve vector number space for error
1346                 * interrupt vectors. This space is stolen from the
1347                 * global vector number space, as in case of ipis
1348                 * and timer interrupts.
1349                 *
1350                 * Available vector space = intvec_top - 12, where 12
1351                 * is the number of vectors which have been consumed by
1352                 * ipis and timer interrupts.
1353                 */
1354                if (fsl_version >= 0x401) {
1355                        ret = mpic_setup_error_int(mpic, intvec_top - 12);
1356                        if (ret)
1357                                return NULL;
1358                }
1359
1360        }
1361
1362        /*
1363         * EPR is only available starting with v4.0.  To support
1364         * platforms that don't know the MPIC version at compile-time,
1365         * such as qemu-e500, turn off coreint if this MPIC doesn't
1366         * support it.  Note that we never enable it if it wasn't
1367         * requested in the first place.
1368         *
1369         * This is done outside the MPIC_FSL check, so that we
1370         * also disable coreint if the MPIC node doesn't have
1371         * an "fsl,mpic" compatible at all.  This will be the case
1372         * with device trees generated by older versions of QEMU.
1373         * fsl_version will be zero if MPIC_FSL is not set.
1374         */
1375        if (fsl_version < 0x400 && (flags & MPIC_ENABLE_COREINT)) {
1376                WARN_ON(ppc_md.get_irq != mpic_get_coreint_irq);
1377                ppc_md.get_irq = mpic_get_irq;
1378        }
1379
1380        /* Reset */
1381
1382        /* When using a device-node, reset requests are only honored if the MPIC
1383         * is allowed to reset.
1384         */
1385        if (!(mpic->flags & MPIC_NO_RESET)) {
1386                printk(KERN_DEBUG "mpic: Resetting\n");
1387                mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1388                           mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1389                           | MPIC_GREG_GCONF_RESET);
1390                while( mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1391                       & MPIC_GREG_GCONF_RESET)
1392                        mb();
1393        }
1394
1395        /* CoreInt */
1396        if (mpic->flags & MPIC_ENABLE_COREINT)
1397                mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1398                           mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1399                           | MPIC_GREG_GCONF_COREINT);
1400
1401        if (mpic->flags & MPIC_ENABLE_MCK)
1402                mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1403                           mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1404                           | MPIC_GREG_GCONF_MCK);
1405
1406        /*
1407         * The MPIC driver will crash if there are more cores than we
1408         * can initialize, so we may as well catch that problem here.
1409         */
1410        BUG_ON(num_possible_cpus() > MPIC_MAX_CPUS);
1411
1412        /* Map the per-CPU registers */
1413        for_each_possible_cpu(i) {
1414                unsigned int cpu = get_hard_smp_processor_id(i);
1415
1416                mpic_map(mpic, mpic->paddr, &mpic->cpuregs[cpu],
1417                         MPIC_INFO(CPU_BASE) + cpu * MPIC_INFO(CPU_STRIDE),
1418                         0x1000);
1419        }
1420
1421        /*
1422         * Read feature register.  For non-ISU MPICs, num sources as well. On
1423         * ISU MPICs, sources are counted as ISUs are added
1424         */
1425        greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0));
1426
1427        /*
1428         * By default, the last source number comes from the MPIC, but the
1429         * device-tree and board support code can override it on buggy hw.
1430         * If we get passed an isu_size (multi-isu MPIC) then we use that
1431         * as a default instead of the value read from the HW.
1432         */
1433        last_irq = (greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK)
1434                                >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT;    
1435        if (isu_size)
1436                last_irq = isu_size  * MPIC_MAX_ISU - 1;
1437        of_property_read_u32(mpic->node, "last-interrupt-source", &last_irq);
1438        if (irq_count)
1439                last_irq = irq_count - 1;
1440
1441        /* Initialize main ISU if none provided */
1442        if (!isu_size) {
1443                isu_size = last_irq + 1;
1444                mpic->num_sources = isu_size;
1445                mpic_map(mpic, mpic->paddr, &mpic->isus[0],
1446                                MPIC_INFO(IRQ_BASE),
1447                                MPIC_INFO(IRQ_STRIDE) * isu_size);
1448        }
1449
1450        mpic->isu_size = isu_size;
1451        mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
1452        mpic->isu_mask = (1 << mpic->isu_shift) - 1;
1453
1454        mpic->irqhost = irq_domain_add_linear(mpic->node,
1455                                       intvec_top,
1456                                       &mpic_host_ops, mpic);
1457
1458        /*
1459         * FIXME: The code leaks the MPIC object and mappings here; this
1460         * is very unlikely to fail but it ought to be fixed anyways.
1461         */
1462        if (mpic->irqhost == NULL)
1463                return NULL;
1464
1465        /* Display version */
1466        switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) {
1467        case 1:
1468                vers = "1.0";
1469                break;
1470        case 2:
1471                vers = "1.2";
1472                break;
1473        case 3:
1474                vers = "1.3";
1475                break;
1476        default:
1477                vers = "<unknown>";
1478                break;
1479        }
1480        printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx,"
1481               " max %d CPUs\n",
1482               name, vers, (unsigned long long)mpic->paddr, num_possible_cpus());
1483        printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n",
1484               mpic->isu_size, mpic->isu_shift, mpic->isu_mask);
1485
1486        mpic->next = mpics;
1487        mpics = mpic;
1488
1489        if (!(mpic->flags & MPIC_SECONDARY)) {
1490                mpic_primary = mpic;
1491                irq_set_default_host(mpic->irqhost);
1492        }
1493
1494        return mpic;
1495
1496err_of_node_put:
1497        of_node_put(node);
1498        return NULL;
1499}
1500
1501void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
1502                            phys_addr_t paddr)
1503{
1504        unsigned int isu_first = isu_num * mpic->isu_size;
1505
1506        BUG_ON(isu_num >= MPIC_MAX_ISU);
1507
1508        mpic_map(mpic,
1509                 paddr, &mpic->isus[isu_num], 0,
1510                 MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
1511
1512        if ((isu_first + mpic->isu_size) > mpic->num_sources)
1513                mpic->num_sources = isu_first + mpic->isu_size;
1514}
1515
1516void __init mpic_init(struct mpic *mpic)
1517{
1518        int i, cpu;
1519        int num_timers = 4;
1520
1521        BUG_ON(mpic->num_sources == 0);
1522
1523        printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
1524
1525        /* Set current processor priority to max */
1526        mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
1527
1528        if (mpic->flags & MPIC_FSL) {
1529                u32 brr1 = _mpic_read(mpic->reg_type, &mpic->thiscpuregs,
1530                                      MPIC_FSL_BRR1);
1531                u32 version = brr1 & MPIC_FSL_BRR1_VER;
1532
1533                /*
1534                 * Timer group B is present at the latest in MPIC 3.1 (e.g.
1535                 * mpc8536).  It is not present in MPIC 2.0 (e.g. mpc8544).
1536                 * I don't know about the status of intermediate versions (or
1537                 * whether they even exist).
1538                 */
1539                if (version >= 0x0301)
1540                        num_timers = 8;
1541        }
1542
1543        /* FSL mpic error interrupt intialization */
1544        if (mpic->flags & MPIC_FSL_HAS_EIMR)
1545                mpic_err_int_init(mpic, MPIC_FSL_ERR_INT);
1546
1547        /* Initialize timers to our reserved vectors and mask them for now */
1548        for (i = 0; i < num_timers; i++) {
1549                unsigned int offset = mpic_tm_offset(mpic, i);
1550
1551                mpic_write(mpic->tmregs,
1552                           offset + MPIC_INFO(TIMER_DESTINATION),
1553                           1 << hard_smp_processor_id());
1554                mpic_write(mpic->tmregs,
1555                           offset + MPIC_INFO(TIMER_VECTOR_PRI),
1556                           MPIC_VECPRI_MASK |
1557                           (9 << MPIC_VECPRI_PRIORITY_SHIFT) |
1558                           (mpic->timer_vecs[0] + i));
1559        }
1560
1561        /* Initialize IPIs to our reserved vectors and mark them disabled for now */
1562        mpic_test_broken_ipi(mpic);
1563        for (i = 0; i < 4; i++) {
1564                mpic_ipi_write(i,
1565                               MPIC_VECPRI_MASK |
1566                               (10 << MPIC_VECPRI_PRIORITY_SHIFT) |
1567                               (mpic->ipi_vecs[0] + i));
1568        }
1569
1570        /* Do the HT PIC fixups on U3 broken mpic */
1571        DBG("MPIC flags: %x\n", mpic->flags);
1572        if ((mpic->flags & MPIC_U3_HT_IRQS) && !(mpic->flags & MPIC_SECONDARY)) {
1573                mpic_scan_ht_pics(mpic);
1574                mpic_u3msi_init(mpic);
1575        }
1576
1577        mpic_pasemi_msi_init(mpic);
1578
1579        cpu = mpic_processor_id(mpic);
1580
1581        if (!(mpic->flags & MPIC_NO_RESET)) {
1582                for (i = 0; i < mpic->num_sources; i++) {
1583                        /* start with vector = source number, and masked */
1584                        u32 vecpri = MPIC_VECPRI_MASK | i |
1585                                (8 << MPIC_VECPRI_PRIORITY_SHIFT);
1586                
1587                        /* check if protected */
1588                        if (mpic->protected && test_bit(i, mpic->protected))
1589                                continue;
1590                        /* init hw */
1591                        mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
1592                        mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu);
1593                }
1594        }
1595        
1596        /* Init spurious vector */
1597        mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), mpic->spurious_vec);
1598
1599        /* Disable 8259 passthrough, if supported */
1600        if (!(mpic->flags & MPIC_NO_PTHROU_DIS))
1601                mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1602                           mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1603                           | MPIC_GREG_GCONF_8259_PTHROU_DIS);
1604
1605        if (mpic->flags & MPIC_NO_BIAS)
1606                mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1607                        mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1608                        | MPIC_GREG_GCONF_NO_BIAS);
1609
1610        /* Set current processor priority to 0 */
1611        mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
1612
1613#ifdef CONFIG_PM
1614        /* allocate memory to save mpic state */
1615        mpic->save_data = kmalloc(mpic->num_sources * sizeof(*mpic->save_data),
1616                                  GFP_KERNEL);
1617        BUG_ON(mpic->save_data == NULL);
1618#endif
1619
1620        /* Check if this MPIC is chained from a parent interrupt controller */
1621        if (mpic->flags & MPIC_SECONDARY) {
1622                int virq = irq_of_parse_and_map(mpic->node, 0);
1623                if (virq != NO_IRQ) {
1624                        printk(KERN_INFO "%s: hooking up to IRQ %d\n",
1625                                        mpic->node->full_name, virq);
1626                        irq_set_handler_data(virq, mpic);
1627                        irq_set_chained_handler(virq, &mpic_cascade);
1628                }
1629        }
1630}
1631
1632void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
1633{
1634        u32 v;
1635
1636        v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
1637        v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK;
1638        v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio);
1639        mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
1640}
1641
1642void __init mpic_set_serial_int(struct mpic *mpic, int enable)
1643{
1644        unsigned long flags;
1645        u32 v;
1646
1647        raw_spin_lock_irqsave(&mpic_lock, flags);
1648        v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
1649        if (enable)
1650                v |= MPIC_GREG_GLOBAL_CONF_1_SIE;
1651        else
1652                v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE;
1653        mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
1654        raw_spin_unlock_irqrestore(&mpic_lock, flags);
1655}
1656
1657void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
1658{
1659        struct mpic *mpic = mpic_find(irq);
1660        unsigned int src = virq_to_hw(irq);
1661        unsigned long flags;
1662        u32 reg;
1663
1664        if (!mpic)
1665                return;
1666
1667        raw_spin_lock_irqsave(&mpic_lock, flags);
1668        if (mpic_is_ipi(mpic, src)) {
1669                reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) &
1670                        ~MPIC_VECPRI_PRIORITY_MASK;
1671                mpic_ipi_write(src - mpic->ipi_vecs[0],
1672                               reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1673        } else if (mpic_is_tm(mpic, src)) {
1674                reg = mpic_tm_read(src - mpic->timer_vecs[0]) &
1675                        ~MPIC_VECPRI_PRIORITY_MASK;
1676                mpic_tm_write(src - mpic->timer_vecs[0],
1677                              reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1678        } else {
1679                reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI))
1680                        & ~MPIC_VECPRI_PRIORITY_MASK;
1681                mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
1682                               reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1683        }
1684        raw_spin_unlock_irqrestore(&mpic_lock, flags);
1685}
1686
1687void mpic_setup_this_cpu(void)
1688{
1689#ifdef CONFIG_SMP
1690        struct mpic *mpic = mpic_primary;
1691        unsigned long flags;
1692        u32 msk = 1 << hard_smp_processor_id();
1693        unsigned int i;
1694
1695        BUG_ON(mpic == NULL);
1696
1697        DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
1698
1699        raw_spin_lock_irqsave(&mpic_lock, flags);
1700
1701        /* let the mpic know we want intrs. default affinity is 0xffffffff
1702         * until changed via /proc. That's how it's done on x86. If we want
1703         * it differently, then we should make sure we also change the default
1704         * values of irq_desc[].affinity in irq.c.
1705         */
1706        if (distribute_irqs && !(mpic->flags & MPIC_SINGLE_DEST_CPU)) {
1707                for (i = 0; i < mpic->num_sources ; i++)
1708                        mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
1709                                mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk);
1710        }
1711
1712        /* Set current processor priority to 0 */
1713        mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
1714
1715        raw_spin_unlock_irqrestore(&mpic_lock, flags);
1716#endif /* CONFIG_SMP */
1717}
1718
1719int mpic_cpu_get_priority(void)
1720{
1721        struct mpic *mpic = mpic_primary;
1722
1723        return mpic_cpu_read(MPIC_INFO(CPU_CURRENT_TASK_PRI));
1724}
1725
1726void mpic_cpu_set_priority(int prio)
1727{
1728        struct mpic *mpic = mpic_primary;
1729
1730        prio &= MPIC_CPU_TASKPRI_MASK;
1731        mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), prio);
1732}
1733
1734void mpic_teardown_this_cpu(int secondary)
1735{
1736        struct mpic *mpic = mpic_primary;
1737        unsigned long flags;
1738        u32 msk = 1 << hard_smp_processor_id();
1739        unsigned int i;
1740
1741        BUG_ON(mpic == NULL);
1742
1743        DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
1744        raw_spin_lock_irqsave(&mpic_lock, flags);
1745
1746        /* let the mpic know we don't want intrs.  */
1747        for (i = 0; i < mpic->num_sources ; i++)
1748                mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
1749                        mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) & ~msk);
1750
1751        /* Set current processor priority to max */
1752        mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
1753        /* We need to EOI the IPI since not all platforms reset the MPIC
1754         * on boot and new interrupts wouldn't get delivered otherwise.
1755         */
1756        mpic_eoi(mpic);
1757
1758        raw_spin_unlock_irqrestore(&mpic_lock, flags);
1759}
1760
1761
1762static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
1763{
1764        u32 src;
1765
1766        src = mpic_cpu_read(reg) & MPIC_INFO(VECPRI_VECTOR_MASK);
1767#ifdef DEBUG_LOW
1768        DBG("%s: get_one_irq(reg 0x%x): %d\n", mpic->name, reg, src);
1769#endif
1770        if (unlikely(src == mpic->spurious_vec)) {
1771                if (mpic->flags & MPIC_SPV_EOI)
1772                        mpic_eoi(mpic);
1773                return NO_IRQ;
1774        }
1775        if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
1776                printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
1777                                   mpic->name, (int)src);
1778                mpic_eoi(mpic);
1779                return NO_IRQ;
1780        }
1781
1782        return irq_linear_revmap(mpic->irqhost, src);
1783}
1784
1785unsigned int mpic_get_one_irq(struct mpic *mpic)
1786{
1787        return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_INTACK));
1788}
1789
1790unsigned int mpic_get_irq(void)
1791{
1792        struct mpic *mpic = mpic_primary;
1793
1794        BUG_ON(mpic == NULL);
1795
1796        return mpic_get_one_irq(mpic);
1797}
1798
1799unsigned int mpic_get_coreint_irq(void)
1800{
1801#ifdef CONFIG_BOOKE
1802        struct mpic *mpic = mpic_primary;
1803        u32 src;
1804
1805        BUG_ON(mpic == NULL);
1806
1807        src = mfspr(SPRN_EPR);
1808
1809        if (unlikely(src == mpic->spurious_vec)) {
1810                if (mpic->flags & MPIC_SPV_EOI)
1811                        mpic_eoi(mpic);
1812                return NO_IRQ;
1813        }
1814        if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
1815                printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
1816                                   mpic->name, (int)src);
1817                return NO_IRQ;
1818        }
1819
1820        return irq_linear_revmap(mpic->irqhost, src);
1821#else
1822        return NO_IRQ;
1823#endif
1824}
1825
1826unsigned int mpic_get_mcirq(void)
1827{
1828        struct mpic *mpic = mpic_primary;
1829
1830        BUG_ON(mpic == NULL);
1831
1832        return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_MCACK));
1833}
1834
1835#ifdef CONFIG_SMP
1836void mpic_request_ipis(void)
1837{
1838        struct mpic *mpic = mpic_primary;
1839        int i;
1840        BUG_ON(mpic == NULL);
1841
1842        printk(KERN_INFO "mpic: requesting IPIs...\n");
1843
1844        for (i = 0; i < 4; i++) {
1845                unsigned int vipi = irq_create_mapping(mpic->irqhost,
1846                                                       mpic->ipi_vecs[0] + i);
1847                if (vipi == NO_IRQ) {
1848                        printk(KERN_ERR "Failed to map %s\n", smp_ipi_name[i]);
1849                        continue;
1850                }
1851                smp_request_message_ipi(vipi, i);
1852        }
1853}
1854
1855void smp_mpic_message_pass(int cpu, int msg)
1856{
1857        struct mpic *mpic = mpic_primary;
1858        u32 physmask;
1859
1860        BUG_ON(mpic == NULL);
1861
1862        /* make sure we're sending something that translates to an IPI */
1863        if ((unsigned int)msg > 3) {
1864                printk("SMP %d: smp_message_pass: unknown msg %d\n",
1865                       smp_processor_id(), msg);
1866                return;
1867        }
1868
1869#ifdef DEBUG_IPI
1870        DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg);
1871#endif
1872
1873        physmask = 1 << get_hard_smp_processor_id(cpu);
1874
1875        mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
1876                       msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask);
1877}
1878
1879int __init smp_mpic_probe(void)
1880{
1881        int nr_cpus;
1882
1883        DBG("smp_mpic_probe()...\n");
1884
1885        nr_cpus = cpumask_weight(cpu_possible_mask);
1886
1887        DBG("nr_cpus: %d\n", nr_cpus);
1888
1889        if (nr_cpus > 1)
1890                mpic_request_ipis();
1891
1892        return nr_cpus;
1893}
1894
1895void smp_mpic_setup_cpu(int cpu)
1896{
1897        mpic_setup_this_cpu();
1898}
1899
1900void mpic_reset_core(int cpu)
1901{
1902        struct mpic *mpic = mpic_primary;
1903        u32 pir;
1904        int cpuid = get_hard_smp_processor_id(cpu);
1905        int i;
1906
1907        /* Set target bit for core reset */
1908        pir = mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
1909        pir |= (1 << cpuid);
1910        mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir);
1911        mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
1912
1913        /* Restore target bit after reset complete */
1914        pir &= ~(1 << cpuid);
1915        mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir);
1916        mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
1917
1918        /* Perform 15 EOI on each reset core to clear pending interrupts.
1919         * This is required for FSL CoreNet based devices */
1920        if (mpic->flags & MPIC_FSL) {
1921                for (i = 0; i < 15; i++) {
1922                        _mpic_write(mpic->reg_type, &mpic->cpuregs[cpuid],
1923                                      MPIC_CPU_EOI, 0);
1924                }
1925        }
1926}
1927#endif /* CONFIG_SMP */
1928
1929#ifdef CONFIG_PM
1930static void mpic_suspend_one(struct mpic *mpic)
1931{
1932        int i;
1933
1934        for (i = 0; i < mpic->num_sources; i++) {
1935                mpic->save_data[i].vecprio =
1936                        mpic_irq_read(i, MPIC_INFO(IRQ_VECTOR_PRI));
1937                mpic->save_data[i].dest =
1938                        mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION));
1939        }
1940}
1941
1942static int mpic_suspend(void)
1943{
1944        struct mpic *mpic = mpics;
1945
1946        while (mpic) {
1947                mpic_suspend_one(mpic);
1948                mpic = mpic->next;
1949        }
1950
1951        return 0;
1952}
1953
1954static void mpic_resume_one(struct mpic *mpic)
1955{
1956        int i;
1957
1958        for (i = 0; i < mpic->num_sources; i++) {
1959                mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI),
1960                               mpic->save_data[i].vecprio);
1961                mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
1962                               mpic->save_data[i].dest);
1963
1964#ifdef CONFIG_MPIC_U3_HT_IRQS
1965        if (mpic->fixups) {
1966                struct mpic_irq_fixup *fixup = &mpic->fixups[i];
1967
1968                if (fixup->base) {
1969                        /* we use the lowest bit in an inverted meaning */
1970                        if ((mpic->save_data[i].fixup_data & 1) == 0)
1971                                continue;
1972
1973                        /* Enable and configure */
1974                        writeb(0x10 + 2 * fixup->index, fixup->base + 2);
1975
1976                        writel(mpic->save_data[i].fixup_data & ~1,
1977                               fixup->base + 4);
1978                }
1979        }
1980#endif
1981        } /* end for loop */
1982}
1983
1984static void mpic_resume(void)
1985{
1986        struct mpic *mpic = mpics;
1987
1988        while (mpic) {
1989                mpic_resume_one(mpic);
1990                mpic = mpic->next;
1991        }
1992}
1993
1994static struct syscore_ops mpic_syscore_ops = {
1995        .resume = mpic_resume,
1996        .suspend = mpic_suspend,
1997};
1998
1999static int mpic_init_sys(void)
2000{
2001        register_syscore_ops(&mpic_syscore_ops);
2002        return 0;
2003}
2004
2005device_initcall(mpic_init_sys);
2006#endif
2007