linux/arch/xtensa/kernel/irq.c
<<
>>
Prefs
   1/*
   2 * linux/arch/xtensa/kernel/irq.c
   3 *
   4 * Xtensa built-in interrupt controller and some generic functions copied
   5 * from i386.
   6 *
   7 * Copyright (C) 2002 - 2013 Tensilica, Inc.
   8 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
   9 *
  10 *
  11 * Chris Zankel <chris@zankel.net>
  12 * Kevin Chea
  13 *
  14 */
  15
  16#include <linux/module.h>
  17#include <linux/seq_file.h>
  18#include <linux/interrupt.h>
  19#include <linux/irq.h>
  20#include <linux/kernel_stat.h>
  21#include <linux/irqchip.h>
  22#include <linux/irqchip/xtensa-mx.h>
  23#include <linux/irqchip/xtensa-pic.h>
  24#include <linux/irqdomain.h>
  25#include <linux/of.h>
  26
  27#include <asm/mxregs.h>
  28#include <asm/uaccess.h>
  29#include <asm/platform.h>
  30
  31DECLARE_PER_CPU(unsigned long, nmi_count);
  32
  33asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
  34{
  35        int irq = irq_find_mapping(NULL, hwirq);
  36
  37        if (hwirq >= NR_IRQS) {
  38                printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
  39                                __func__, hwirq);
  40        }
  41
  42#ifdef CONFIG_DEBUG_STACKOVERFLOW
  43        /* Debugging check for stack overflow: is there less than 1KB free? */
  44        {
  45                unsigned long sp;
  46
  47                __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
  48                sp &= THREAD_SIZE - 1;
  49
  50                if (unlikely(sp < (sizeof(thread_info) + 1024)))
  51                        printk("Stack overflow in do_IRQ: %ld\n",
  52                               sp - sizeof(struct thread_info));
  53        }
  54#endif
  55        generic_handle_irq(irq);
  56}
  57
  58int arch_show_interrupts(struct seq_file *p, int prec)
  59{
  60        unsigned cpu __maybe_unused;
  61#ifdef CONFIG_SMP
  62        show_ipi_list(p, prec);
  63#endif
  64#if XTENSA_FAKE_NMI
  65        seq_printf(p, "%*s:", prec, "NMI");
  66        for_each_online_cpu(cpu)
  67                seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
  68        seq_puts(p, "   Non-maskable interrupts\n");
  69#endif
  70        return 0;
  71}
  72
  73int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
  74                unsigned long int_irq, unsigned long ext_irq,
  75                unsigned long *out_hwirq, unsigned int *out_type)
  76{
  77        if (WARN_ON(intsize < 1 || intsize > 2))
  78                return -EINVAL;
  79        if (intsize == 2 && intspec[1] == 1) {
  80                int_irq = xtensa_map_ext_irq(ext_irq);
  81                if (int_irq < XCHAL_NUM_INTERRUPTS)
  82                        *out_hwirq = int_irq;
  83                else
  84                        return -EINVAL;
  85        } else {
  86                *out_hwirq = int_irq;
  87        }
  88        *out_type = IRQ_TYPE_NONE;
  89        return 0;
  90}
  91
  92int xtensa_irq_map(struct irq_domain *d, unsigned int irq,
  93                irq_hw_number_t hw)
  94{
  95        struct irq_chip *irq_chip = d->host_data;
  96        u32 mask = 1 << hw;
  97
  98        if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) {
  99                irq_set_chip_and_handler_name(irq, irq_chip,
 100                                handle_simple_irq, "level");
 101                irq_set_status_flags(irq, IRQ_LEVEL);
 102        } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) {
 103                irq_set_chip_and_handler_name(irq, irq_chip,
 104                                handle_edge_irq, "edge");
 105                irq_clear_status_flags(irq, IRQ_LEVEL);
 106        } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) {
 107                irq_set_chip_and_handler_name(irq, irq_chip,
 108                                handle_level_irq, "level");
 109                irq_set_status_flags(irq, IRQ_LEVEL);
 110        } else if (mask & XCHAL_INTTYPE_MASK_TIMER) {
 111                irq_set_chip_and_handler_name(irq, irq_chip,
 112                                handle_percpu_irq, "timer");
 113                irq_clear_status_flags(irq, IRQ_LEVEL);
 114#ifdef XCHAL_INTTYPE_MASK_PROFILING
 115        } else if (mask & XCHAL_INTTYPE_MASK_PROFILING) {
 116                irq_set_chip_and_handler_name(irq, irq_chip,
 117                                handle_percpu_irq, "profiling");
 118                irq_set_status_flags(irq, IRQ_LEVEL);
 119#endif
 120        } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */
 121                /* XCHAL_INTTYPE_MASK_NMI */
 122                irq_set_chip_and_handler_name(irq, irq_chip,
 123                                handle_level_irq, "level");
 124                irq_set_status_flags(irq, IRQ_LEVEL);
 125        }
 126        return 0;
 127}
 128
 129unsigned xtensa_map_ext_irq(unsigned ext_irq)
 130{
 131        unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE |
 132                XCHAL_INTTYPE_MASK_EXTERN_LEVEL;
 133        unsigned i;
 134
 135        for (i = 0; mask; ++i, mask >>= 1) {
 136                if ((mask & 1) && ext_irq-- == 0)
 137                        return i;
 138        }
 139        return XCHAL_NUM_INTERRUPTS;
 140}
 141
 142unsigned xtensa_get_ext_irq_no(unsigned irq)
 143{
 144        unsigned mask = (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
 145                XCHAL_INTTYPE_MASK_EXTERN_LEVEL) &
 146                ((1u << irq) - 1);
 147        return hweight32(mask);
 148}
 149
 150void __init init_IRQ(void)
 151{
 152#ifdef CONFIG_OF
 153        irqchip_init();
 154#else
 155#ifdef CONFIG_HAVE_SMP
 156        xtensa_mx_init_legacy(NULL);
 157#else
 158        xtensa_pic_init_legacy(NULL);
 159#endif
 160#endif
 161
 162#ifdef CONFIG_SMP
 163        ipi_init();
 164#endif
 165        variant_init_irq();
 166}
 167
 168#ifdef CONFIG_HOTPLUG_CPU
 169/*
 170 * The CPU has been marked offline.  Migrate IRQs off this CPU.  If
 171 * the affinity settings do not allow other CPUs, force them onto any
 172 * available CPU.
 173 */
 174void migrate_irqs(void)
 175{
 176        unsigned int i, cpu = smp_processor_id();
 177
 178        for_each_active_irq(i) {
 179                struct irq_data *data = irq_get_irq_data(i);
 180                struct cpumask *mask;
 181                unsigned int newcpu;
 182
 183                if (irqd_is_per_cpu(data))
 184                        continue;
 185
 186                mask = irq_data_get_affinity_mask(data);
 187                if (!cpumask_test_cpu(cpu, mask))
 188                        continue;
 189
 190                newcpu = cpumask_any_and(mask, cpu_online_mask);
 191
 192                if (newcpu >= nr_cpu_ids) {
 193                        pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
 194                                            i, cpu);
 195
 196                        cpumask_setall(mask);
 197                }
 198                irq_set_affinity(i, mask);
 199        }
 200}
 201#endif /* CONFIG_HOTPLUG_CPU */
 202