linux/arch/powerpc/sysdev/xics/icp-native.c
<<
>>
Prefs
   1/*
   2 * Copyright 2011 IBM Corporation.
   3 *
   4 *  This program is free software; you can redistribute it and/or
   5 *  modify it under the terms of the GNU General Public License
   6 *  as published by the Free Software Foundation; either version
   7 *  2 of the License, or (at your option) any later version.
   8 *
   9 */
  10
  11#include <linux/types.h>
  12#include <linux/kernel.h>
  13#include <linux/irq.h>
  14#include <linux/smp.h>
  15#include <linux/interrupt.h>
  16#include <linux/init.h>
  17#include <linux/cpu.h>
  18#include <linux/of.h>
  19#include <linux/spinlock.h>
  20#include <linux/module.h>
  21
  22#include <asm/prom.h>
  23#include <asm/io.h>
  24#include <asm/smp.h>
  25#include <asm/irq.h>
  26#include <asm/errno.h>
  27#include <asm/xics.h>
  28#include <asm/kvm_ppc.h>
  29#include <asm/dbell.h>
  30
  31struct icp_ipl {
  32        union {
  33                u32 word;
  34                u8 bytes[4];
  35        } xirr_poll;
  36        union {
  37                u32 word;
  38                u8 bytes[4];
  39        } xirr;
  40        u32 dummy;
  41        union {
  42                u32 word;
  43                u8 bytes[4];
  44        } qirr;
  45        u32 link_a;
  46        u32 link_b;
  47        u32 link_c;
  48};
  49
  50static struct icp_ipl __iomem *icp_native_regs[NR_CPUS];
  51
  52static inline unsigned int icp_native_get_xirr(void)
  53{
  54        int cpu = smp_processor_id();
  55        unsigned int xirr;
  56
  57        /* Handled an interrupt latched by KVM */
  58        xirr = kvmppc_get_xics_latch();
  59        if (xirr)
  60                return xirr;
  61
  62        return in_be32(&icp_native_regs[cpu]->xirr.word);
  63}
  64
  65static inline void icp_native_set_xirr(unsigned int value)
  66{
  67        int cpu = smp_processor_id();
  68
  69        out_be32(&icp_native_regs[cpu]->xirr.word, value);
  70}
  71
  72static inline void icp_native_set_cppr(u8 value)
  73{
  74        int cpu = smp_processor_id();
  75
  76        out_8(&icp_native_regs[cpu]->xirr.bytes[0], value);
  77}
  78
  79static inline void icp_native_set_qirr(int n_cpu, u8 value)
  80{
  81        out_8(&icp_native_regs[n_cpu]->qirr.bytes[0], value);
  82}
  83
  84static void icp_native_set_cpu_priority(unsigned char cppr)
  85{
  86        xics_set_base_cppr(cppr);
  87        icp_native_set_cppr(cppr);
  88        iosync();
  89}
  90
  91void icp_native_eoi(struct irq_data *d)
  92{
  93        unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
  94
  95        iosync();
  96        icp_native_set_xirr((xics_pop_cppr() << 24) | hw_irq);
  97}
  98
  99static void icp_native_teardown_cpu(void)
 100{
 101        int cpu = smp_processor_id();
 102
 103        /* Clear any pending IPI */
 104        icp_native_set_qirr(cpu, 0xff);
 105}
 106
 107static void icp_native_flush_ipi(void)
 108{
 109        /* We take the ipi irq but and never return so we
 110         * need to EOI the IPI, but want to leave our priority 0
 111         *
 112         * should we check all the other interrupts too?
 113         * should we be flagging idle loop instead?
 114         * or creating some task to be scheduled?
 115         */
 116
 117        icp_native_set_xirr((0x00 << 24) | XICS_IPI);
 118}
 119
 120static unsigned int icp_native_get_irq(void)
 121{
 122        unsigned int xirr = icp_native_get_xirr();
 123        unsigned int vec = xirr & 0x00ffffff;
 124        unsigned int irq;
 125
 126        if (vec == XICS_IRQ_SPURIOUS)
 127                return 0;
 128
 129        irq = irq_find_mapping(xics_host, vec);
 130        if (likely(irq)) {
 131                xics_push_cppr(vec);
 132                return irq;
 133        }
 134
 135        /* We don't have a linux mapping, so have rtas mask it. */
 136        xics_mask_unknown_vec(vec);
 137
 138        /* We might learn about it later, so EOI it */
 139        icp_native_set_xirr(xirr);
 140
 141        return 0;
 142}
 143
 144#ifdef CONFIG_SMP
 145
 146static void icp_native_cause_ipi(int cpu)
 147{
 148        kvmppc_set_host_ipi(cpu, 1);
 149        icp_native_set_qirr(cpu, IPI_PRIORITY);
 150}
 151
 152#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 153void icp_native_cause_ipi_rm(int cpu)
 154{
 155        /*
 156         * Currently not used to send IPIs to another CPU
 157         * on the same core. Only caller is KVM real mode.
 158         * Need the physical address of the XICS to be
 159         * previously saved in kvm_hstate in the paca.
 160         */
 161        void __iomem *xics_phys;
 162
 163        /*
 164         * Just like the cause_ipi functions, it is required to
 165         * include a full barrier before causing the IPI.
 166         */
 167        xics_phys = paca[cpu].kvm_hstate.xics_phys;
 168        mb();
 169        __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
 170}
 171#endif
 172
 173/*
 174 * Called when an interrupt is received on an off-line CPU to
 175 * clear the interrupt, so that the CPU can go back to nap mode.
 176 */
 177void icp_native_flush_interrupt(void)
 178{
 179        unsigned int xirr = icp_native_get_xirr();
 180        unsigned int vec = xirr & 0x00ffffff;
 181
 182        if (vec == XICS_IRQ_SPURIOUS)
 183                return;
 184        if (vec == XICS_IPI) {
 185                /* Clear pending IPI */
 186                int cpu = smp_processor_id();
 187                kvmppc_set_host_ipi(cpu, 0);
 188                icp_native_set_qirr(cpu, 0xff);
 189        } else {
 190                pr_err("XICS: hw interrupt 0x%x to offline cpu, disabling\n",
 191                       vec);
 192                xics_mask_unknown_vec(vec);
 193        }
 194        /* EOI the interrupt */
 195        icp_native_set_xirr(xirr);
 196}
 197
 198void xics_wake_cpu(int cpu)
 199{
 200        icp_native_set_qirr(cpu, IPI_PRIORITY);
 201}
 202EXPORT_SYMBOL_GPL(xics_wake_cpu);
 203
 204static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
 205{
 206        int cpu = smp_processor_id();
 207
 208        kvmppc_set_host_ipi(cpu, 0);
 209        icp_native_set_qirr(cpu, 0xff);
 210
 211        return smp_ipi_demux();
 212}
 213
 214#endif /* CONFIG_SMP */
 215
 216static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
 217                                         unsigned long size)
 218{
 219        char *rname;
 220        int i, cpu = -1;
 221
 222        /* This may look gross but it's good enough for now, we don't quite
 223         * have a hard -> linux processor id matching.
 224         */
 225        for_each_possible_cpu(i) {
 226                if (!cpu_present(i))
 227                        continue;
 228                if (hw_id == get_hard_smp_processor_id(i)) {
 229                        cpu = i;
 230                        break;
 231                }
 232        }
 233
 234        /* Fail, skip that CPU. Don't print, it's normal, some XICS come up
 235         * with way more entries in there than you have CPUs
 236         */
 237        if (cpu == -1)
 238                return 0;
 239
 240        rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation",
 241                          cpu, hw_id);
 242
 243        if (!request_mem_region(addr, size, rname)) {
 244                pr_warning("icp_native: Could not reserve ICP MMIO"
 245                           " for CPU %d, interrupt server #0x%x\n",
 246                           cpu, hw_id);
 247                return -EBUSY;
 248        }
 249
 250        icp_native_regs[cpu] = ioremap(addr, size);
 251        kvmppc_set_xics_phys(cpu, addr);
 252        if (!icp_native_regs[cpu]) {
 253                pr_warning("icp_native: Failed ioremap for CPU %d, "
 254                           "interrupt server #0x%x, addr %#lx\n",
 255                           cpu, hw_id, addr);
 256                release_mem_region(addr, size);
 257                return -ENOMEM;
 258        }
 259        return 0;
 260}
 261
 262static int __init icp_native_init_one_node(struct device_node *np,
 263                                           unsigned int *indx)
 264{
 265        unsigned int ilen;
 266        const __be32 *ireg;
 267        int i;
 268        int reg_tuple_size;
 269        int num_servers = 0;
 270
 271        /* This code does the theorically broken assumption that the interrupt
 272         * server numbers are the same as the hard CPU numbers.
 273         * This happens to be the case so far but we are playing with fire...
 274         * should be fixed one of these days. -BenH.
 275         */
 276        ireg = of_get_property(np, "ibm,interrupt-server-ranges", &ilen);
 277
 278        /* Do that ever happen ? we'll know soon enough... but even good'old
 279         * f80 does have that property ..
 280         */
 281        WARN_ON((ireg == NULL) || (ilen != 2*sizeof(u32)));
 282
 283        if (ireg) {
 284                *indx = of_read_number(ireg, 1);
 285                if (ilen >= 2*sizeof(u32))
 286                        num_servers = of_read_number(ireg + 1, 1);
 287        }
 288
 289        ireg = of_get_property(np, "reg", &ilen);
 290        if (!ireg) {
 291                pr_err("icp_native: Can't find interrupt reg property");
 292                return -1;
 293        }
 294
 295        reg_tuple_size = (of_n_addr_cells(np) + of_n_size_cells(np)) * 4;
 296        if (((ilen % reg_tuple_size) != 0)
 297            || (num_servers && (num_servers != (ilen / reg_tuple_size)))) {
 298                pr_err("icp_native: ICP reg len (%d) != num servers (%d)",
 299                       ilen / reg_tuple_size, num_servers);
 300                return -1;
 301        }
 302
 303        for (i = 0; i < (ilen / reg_tuple_size); i++) {
 304                struct resource r;
 305                int err;
 306
 307                err = of_address_to_resource(np, i, &r);
 308                if (err) {
 309                        pr_err("icp_native: Could not translate ICP MMIO"
 310                               " for interrupt server 0x%x (%d)\n", *indx, err);
 311                        return -1;
 312                }
 313
 314                if (icp_native_map_one_cpu(*indx, r.start, resource_size(&r)))
 315                        return -1;
 316
 317                (*indx)++;
 318        }
 319        return 0;
 320}
 321
 322static const struct icp_ops icp_native_ops = {
 323        .get_irq        = icp_native_get_irq,
 324        .eoi            = icp_native_eoi,
 325        .set_priority   = icp_native_set_cpu_priority,
 326        .teardown_cpu   = icp_native_teardown_cpu,
 327        .flush_ipi      = icp_native_flush_ipi,
 328#ifdef CONFIG_SMP
 329        .ipi_action     = icp_native_ipi_action,
 330        .cause_ipi      = icp_native_cause_ipi,
 331#endif
 332};
 333
 334int __init icp_native_init(void)
 335{
 336        struct device_node *np;
 337        u32 indx = 0;
 338        int found = 0;
 339
 340        for_each_compatible_node(np, NULL, "ibm,ppc-xicp")
 341                if (icp_native_init_one_node(np, &indx) == 0)
 342                        found = 1;
 343        if (!found) {
 344                for_each_node_by_type(np,
 345                        "PowerPC-External-Interrupt-Presentation") {
 346                                if (icp_native_init_one_node(np, &indx) == 0)
 347                                        found = 1;
 348                }
 349        }
 350
 351        if (found == 0)
 352                return -ENODEV;
 353
 354        icp_ops = &icp_native_ops;
 355
 356        return 0;
 357}
 358