linux/arch/mips/pmcs-msp71xx/msp_irq_cic.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
   4 *
   5 * This file define the irq handler for MSP CIC subsystem interrupts.
   6 */
   7
   8#include <linux/init.h>
   9#include <linux/interrupt.h>
  10#include <linux/kernel.h>
  11#include <linux/bitops.h>
  12#include <linux/irq.h>
  13
  14#include <asm/mipsregs.h>
  15
  16#include <msp_cic_int.h>
  17#include <msp_regs.h>
  18
  19/*
  20 * External API
  21 */
  22extern void msp_per_irq_init(void);
  23extern void msp_per_irq_dispatch(void);
  24
  25
  26/*
  27 * Convenience Macro.  Should be somewhere generic.
  28 */
  29#define get_current_vpe()   \
  30        ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
  31
  32#ifdef CONFIG_SMP
  33
  34#define LOCK_VPE(flags, mtflags) \
  35do {                            \
  36        local_irq_save(flags);  \
  37        mtflags = dmt();        \
  38} while (0)
  39
  40#define UNLOCK_VPE(flags, mtflags) \
  41do {                            \
  42        emt(mtflags);           \
  43        local_irq_restore(flags);\
  44} while (0)
  45
  46#define LOCK_CORE(flags, mtflags) \
  47do {                            \
  48        local_irq_save(flags);  \
  49        mtflags = dvpe();       \
  50} while (0)
  51
  52#define UNLOCK_CORE(flags, mtflags)             \
  53do {                            \
  54        evpe(mtflags);          \
  55        local_irq_restore(flags);\
  56} while (0)
  57
  58#else
  59
  60#define LOCK_VPE(flags, mtflags)
  61#define UNLOCK_VPE(flags, mtflags)
  62#endif
  63
  64/* ensure writes to cic are completed */
  65static inline void cic_wmb(void)
  66{
  67        const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG;
  68        volatile u32 dummy_read;
  69
  70        wmb();
  71        dummy_read = __raw_readl(cic_mem);
  72        dummy_read++;
  73}
  74
  75static void unmask_cic_irq(struct irq_data *d)
  76{
  77        volatile u32   *cic_msk_reg = CIC_VPE0_MSK_REG;
  78        int vpe;
  79#ifdef CONFIG_SMP
  80        unsigned int mtflags;
  81        unsigned long  flags;
  82
  83        /*
  84        * Make sure we have IRQ affinity.  It may have changed while
  85        * we were processing the IRQ.
  86        */
  87        if (!cpumask_test_cpu(smp_processor_id(),
  88                              irq_data_get_affinity_mask(d)))
  89                return;
  90#endif
  91
  92        vpe = get_current_vpe();
  93        LOCK_VPE(flags, mtflags);
  94        cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE));
  95        UNLOCK_VPE(flags, mtflags);
  96        cic_wmb();
  97}
  98
  99static void mask_cic_irq(struct irq_data *d)
 100{
 101        volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
 102        int     vpe = get_current_vpe();
 103#ifdef CONFIG_SMP
 104        unsigned long flags, mtflags;
 105#endif
 106        LOCK_VPE(flags, mtflags);
 107        cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE));
 108        UNLOCK_VPE(flags, mtflags);
 109        cic_wmb();
 110}
 111static void msp_cic_irq_ack(struct irq_data *d)
 112{
 113        mask_cic_irq(d);
 114        /*
 115        * Only really necessary for 18, 16-14 and sometimes 3:0
 116        * (since these can be edge sensitive) but it doesn't
 117        * hurt for the others
 118        */
 119        *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE));
 120}
 121
 122/* Note: Limiting to VSMP.  */
 123
 124#ifdef CONFIG_MIPS_MT_SMP
 125static int msp_cic_irq_set_affinity(struct irq_data *d,
 126                                    const struct cpumask *cpumask, bool force)
 127{
 128        int cpu;
 129        unsigned long flags;
 130        unsigned int  mtflags;
 131        unsigned long imask = (1 << (d->irq - MSP_CIC_INTBASE));
 132        volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG;
 133
 134        /* timer balancing should be disabled in kernel code */
 135        BUG_ON(d->irq == MSP_INT_VPE0_TIMER || d->irq == MSP_INT_VPE1_TIMER);
 136
 137        LOCK_CORE(flags, mtflags);
 138        /* enable if any of each VPE's TCs require this IRQ */
 139        for_each_online_cpu(cpu) {
 140                if (cpumask_test_cpu(cpu, cpumask))
 141                        cic_mask[cpu] |= imask;
 142                else
 143                        cic_mask[cpu] &= ~imask;
 144
 145        }
 146
 147        UNLOCK_CORE(flags, mtflags);
 148        return 0;
 149
 150}
 151#endif
 152
 153static struct irq_chip msp_cic_irq_controller = {
 154        .name = "MSP_CIC",
 155        .irq_mask = mask_cic_irq,
 156        .irq_mask_ack = msp_cic_irq_ack,
 157        .irq_unmask = unmask_cic_irq,
 158        .irq_ack = msp_cic_irq_ack,
 159#ifdef CONFIG_MIPS_MT_SMP
 160        .irq_set_affinity = msp_cic_irq_set_affinity,
 161#endif
 162};
 163
 164void __init msp_cic_irq_init(void)
 165{
 166        int i;
 167        /* Mask/clear interrupts. */
 168        *CIC_VPE0_MSK_REG = 0x00000000;
 169        *CIC_VPE1_MSK_REG = 0x00000000;
 170        *CIC_STS_REG      = 0xFFFFFFFF;
 171        /*
 172        * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI.
 173        * These inputs map to EXT_INT_POL[6:4] inside the CIC.
 174        * They are to be active low, level sensitive.
 175        */
 176        *CIC_EXT_CFG_REG &= 0xFFFF8F8F;
 177
 178        /* initialize all the IRQ descriptors */
 179        for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) {
 180                irq_set_chip_and_handler(i, &msp_cic_irq_controller,
 181                                         handle_level_irq);
 182        }
 183
 184        /* Initialize the PER interrupt sub-system */
 185         msp_per_irq_init();
 186}
 187
 188/* CIC masked by CIC vector processing before dispatch called */
 189void msp_cic_irq_dispatch(void)
 190{
 191        volatile u32    *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG;
 192        u32     cic_mask;
 193        u32      pending;
 194        int     cic_status = *CIC_STS_REG;
 195        cic_mask = cic_msk_reg[get_current_vpe()];
 196        pending = cic_status & cic_mask;
 197        if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) {
 198                do_IRQ(MSP_INT_VPE0_TIMER);
 199        } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) {
 200                do_IRQ(MSP_INT_VPE1_TIMER);
 201        } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) {
 202                msp_per_irq_dispatch();
 203        } else if (pending) {
 204                do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1);
 205        } else{
 206                spurious_interrupt();
 207        }
 208}
 209