linux/arch/arc/kernel/mcip.c
<<
>>
Prefs
   1/*
   2 * ARC ARConnect (MultiCore IP) support (formerly known as MCIP)
   3 *
   4 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10
  11#include <linux/smp.h>
  12#include <linux/irq.h>
  13#include <linux/spinlock.h>
  14#include <asm/irqflags-arcv2.h>
  15#include <asm/mcip.h>
  16#include <asm/setup.h>
  17
  18static char smp_cpuinfo_buf[128];
  19static int idu_detected;
  20
  21static DEFINE_RAW_SPINLOCK(mcip_lock);
  22
  23static void mcip_setup_per_cpu(int cpu)
  24{
  25        smp_ipi_irq_setup(cpu, IPI_IRQ);
  26        smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
  27}
  28
  29static void mcip_ipi_send(int cpu)
  30{
  31        unsigned long flags;
  32        int ipi_was_pending;
  33
  34        /* ARConnect can only send IPI to others */
  35        if (unlikely(cpu == raw_smp_processor_id())) {
  36                arc_softirq_trigger(SOFTIRQ_IRQ);
  37                return;
  38        }
  39
  40        raw_spin_lock_irqsave(&mcip_lock, flags);
  41
  42        /*
  43         * If receiver already has a pending interrupt, elide sending this one.
  44         * Linux cross core calling works well with concurrent IPIs
  45         * coalesced into one
  46         * see arch/arc/kernel/smp.c: ipi_send_msg_one()
  47         */
  48        __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
  49        ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
  50        if (!ipi_was_pending)
  51                __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
  52
  53        raw_spin_unlock_irqrestore(&mcip_lock, flags);
  54}
  55
  56static void mcip_ipi_clear(int irq)
  57{
  58        unsigned int cpu, c;
  59        unsigned long flags;
  60
  61        if (unlikely(irq == SOFTIRQ_IRQ)) {
  62                arc_softirq_clear(irq);
  63                return;
  64        }
  65
  66        raw_spin_lock_irqsave(&mcip_lock, flags);
  67
  68        /* Who sent the IPI */
  69        __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
  70
  71        cpu = read_aux_reg(ARC_REG_MCIP_READBACK);      /* 1,2,4,8... */
  72
  73        /*
  74         * In rare case, multiple concurrent IPIs sent to same target can
  75         * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be
  76         * "vectored" (multiple bits sets) as opposed to typical single bit
  77         */
  78        do {
  79                c = __ffs(cpu);                 /* 0,1,2,3 */
  80                __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
  81                cpu &= ~(1U << c);
  82        } while (cpu);
  83
  84        raw_spin_unlock_irqrestore(&mcip_lock, flags);
  85}
  86
  87static void mcip_probe_n_setup(void)
  88{
  89        struct mcip_bcr {
  90#ifdef CONFIG_CPU_BIG_ENDIAN
  91                unsigned int pad3:8,
  92                             idu:1, llm:1, num_cores:6,
  93                             iocoh:1,  gfrc:1, dbg:1, pad2:1,
  94                             msg:1, sem:1, ipi:1, pad:1,
  95                             ver:8;
  96#else
  97                unsigned int ver:8,
  98                             pad:1, ipi:1, sem:1, msg:1,
  99                             pad2:1, dbg:1, gfrc:1, iocoh:1,
 100                             num_cores:6, llm:1, idu:1,
 101                             pad3:8;
 102#endif
 103        } mp;
 104
 105        READ_BCR(ARC_REG_MCIP_BCR, mp);
 106
 107        sprintf(smp_cpuinfo_buf,
 108                "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
 109                mp.ver, mp.num_cores,
 110                IS_AVAIL1(mp.ipi, "IPI "),
 111                IS_AVAIL1(mp.idu, "IDU "),
 112                IS_AVAIL1(mp.llm, "LLM "),
 113                IS_AVAIL1(mp.dbg, "DEBUG "),
 114                IS_AVAIL1(mp.gfrc, "GFRC"));
 115
 116        cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
 117        idu_detected = mp.idu;
 118
 119        if (mp.dbg) {
 120                __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
 121                __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
 122        }
 123}
 124
 125struct plat_smp_ops plat_smp_ops = {
 126        .info           = smp_cpuinfo_buf,
 127        .init_early_smp = mcip_probe_n_setup,
 128        .init_per_cpu   = mcip_setup_per_cpu,
 129        .ipi_send       = mcip_ipi_send,
 130        .ipi_clear      = mcip_ipi_clear,
 131};
 132
 133/***************************************************************************
 134 * ARCv2 Interrupt Distribution Unit (IDU)
 135 *
 136 * Connects external "COMMON" IRQs to core intc, providing:
 137 *  -dynamic routing (IRQ affinity)
 138 *  -load balancing (Round Robin interrupt distribution)
 139 *  -1:N distribution
 140 *
 141 * It physically resides in the MCIP hw block
 142 */
 143
 144#include <linux/irqchip.h>
 145#include <linux/of.h>
 146#include <linux/of_irq.h>
 147
 148/*
 149 * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
 150 */
 151static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
 152{
 153        __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
 154}
 155
 156static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl,
 157                           unsigned int distr)
 158{
 159        union {
 160                unsigned int word;
 161                struct {
 162                        unsigned int distr:2, pad:2, lvl:1, pad2:27;
 163                };
 164        } data;
 165
 166        data.distr = distr;
 167        data.lvl = lvl;
 168        __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
 169}
 170
 171static void idu_irq_mask(struct irq_data *data)
 172{
 173        unsigned long flags;
 174
 175        raw_spin_lock_irqsave(&mcip_lock, flags);
 176        __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
 177        raw_spin_unlock_irqrestore(&mcip_lock, flags);
 178}
 179
 180static void idu_irq_unmask(struct irq_data *data)
 181{
 182        unsigned long flags;
 183
 184        raw_spin_lock_irqsave(&mcip_lock, flags);
 185        __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0);
 186        raw_spin_unlock_irqrestore(&mcip_lock, flags);
 187}
 188
 189#ifdef CONFIG_SMP
 190static int
 191idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 192                     bool force)
 193{
 194        unsigned long flags;
 195        cpumask_t online;
 196
 197        /* errout if no online cpu per @cpumask */
 198        if (!cpumask_and(&online, cpumask, cpu_online_mask))
 199                return -EINVAL;
 200
 201        raw_spin_lock_irqsave(&mcip_lock, flags);
 202
 203        idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
 204        idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
 205
 206        raw_spin_unlock_irqrestore(&mcip_lock, flags);
 207
 208        return IRQ_SET_MASK_OK;
 209}
 210#endif
 211
 212static struct irq_chip idu_irq_chip = {
 213        .name                   = "MCIP IDU Intc",
 214        .irq_mask               = idu_irq_mask,
 215        .irq_unmask             = idu_irq_unmask,
 216#ifdef CONFIG_SMP
 217        .irq_set_affinity       = idu_irq_set_affinity,
 218#endif
 219
 220};
 221
 222static int idu_first_irq;
 223
 224static void idu_cascade_isr(struct irq_desc *desc)
 225{
 226        struct irq_domain *domain = irq_desc_get_handler_data(desc);
 227        unsigned int core_irq = irq_desc_get_irq(desc);
 228        unsigned int idu_irq;
 229
 230        idu_irq = core_irq - idu_first_irq;
 231        generic_handle_irq(irq_find_mapping(domain, idu_irq));
 232}
 233
 234static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
 235{
 236        irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq);
 237        irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
 238
 239        return 0;
 240}
 241
 242static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
 243                         const u32 *intspec, unsigned int intsize,
 244                         irq_hw_number_t *out_hwirq, unsigned int *out_type)
 245{
 246        irq_hw_number_t hwirq = *out_hwirq = intspec[0];
 247        int distri = intspec[1];
 248        unsigned long flags;
 249
 250        *out_type = IRQ_TYPE_NONE;
 251
 252        /* XXX: validate distribution scheme again online cpu mask */
 253        if (distri == 0) {
 254                /* 0 - Round Robin to all cpus, otherwise 1 bit per core */
 255                raw_spin_lock_irqsave(&mcip_lock, flags);
 256                idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
 257                idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
 258                raw_spin_unlock_irqrestore(&mcip_lock, flags);
 259        } else {
 260                /*
 261                 * DEST based distribution for Level Triggered intr can only
 262                 * have 1 CPU, so generalize it to always contain 1 cpu
 263                 */
 264                int cpu = ffs(distri);
 265
 266                if (cpu != fls(distri))
 267                        pr_warn("IDU irq %lx distri mode set to cpu %x\n",
 268                                hwirq, cpu);
 269
 270                raw_spin_lock_irqsave(&mcip_lock, flags);
 271                idu_set_dest(hwirq, cpu);
 272                idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
 273                raw_spin_unlock_irqrestore(&mcip_lock, flags);
 274        }
 275
 276        return 0;
 277}
 278
 279static const struct irq_domain_ops idu_irq_ops = {
 280        .xlate  = idu_irq_xlate,
 281        .map    = idu_irq_map,
 282};
 283
 284/*
 285 * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI)
 286 * [24, 23+C]: If C > 0 then "C" common IRQs
 287 * [24+C, N]: Not statically assigned, private-per-core
 288 */
 289
 290
 291static int __init
 292idu_of_init(struct device_node *intc, struct device_node *parent)
 293{
 294        struct irq_domain *domain;
 295        /* Read IDU BCR to confirm nr_irqs */
 296        int nr_irqs = of_irq_count(intc);
 297        int i, irq;
 298
 299        if (!idu_detected)
 300                panic("IDU not detected, but DeviceTree using it");
 301
 302        pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);
 303
 304        domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL);
 305
 306        /* Parent interrupts (core-intc) are already mapped */
 307
 308        for (i = 0; i < nr_irqs; i++) {
 309                /*
 310                 * Return parent uplink IRQs (towards core intc) 24,25,.....
 311                 * this step has been done before already
 312                 * however we need it to get the parent virq and set IDU handler
 313                 * as first level isr
 314                 */
 315                irq = irq_of_parse_and_map(intc, i);
 316                if (!i)
 317                        idu_first_irq = irq;
 318
 319                irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
 320        }
 321
 322        __mcip_cmd(CMD_IDU_ENABLE, 0);
 323
 324        return 0;
 325}
 326IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init);
 327