linux/drivers/irqchip/irq-xtensa-mx.c
<<
>>
Prefs
   1/*
   2 * Xtensa MX interrupt distributor
   3 *
   4 * Copyright (C) 2002 - 2013 Tensilica, Inc.
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License.  See the file "COPYING" in the main directory of this archive
   8 * for more details.
   9 */
  10
  11#include <linux/interrupt.h>
  12#include <linux/irqdomain.h>
  13#include <linux/irq.h>
  14#include <linux/irqchip.h>
  15#include <linux/of.h>
  16
  17#include <asm/mxregs.h>
  18
  19#define HW_IRQ_IPI_COUNT 2
  20#define HW_IRQ_MX_BASE 2
  21#define HW_IRQ_EXTERN_BASE 3
  22
  23static DEFINE_PER_CPU(unsigned int, cached_irq_mask);
  24
  25static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
  26                irq_hw_number_t hw)
  27{
  28        if (hw < HW_IRQ_IPI_COUNT) {
  29                struct irq_chip *irq_chip = d->host_data;
  30                irq_set_chip_and_handler_name(irq, irq_chip,
  31                                handle_percpu_irq, "ipi");
  32                irq_set_status_flags(irq, IRQ_LEVEL);
  33                return 0;
  34        }
  35        return xtensa_irq_map(d, irq, hw);
  36}
  37
  38/*
  39 * Device Tree IRQ specifier translation function which works with one or
  40 * two cell bindings. First cell value maps directly to the hwirq number.
  41 * Second cell if present specifies whether hwirq number is external (1) or
  42 * internal (0).
  43 */
  44static int xtensa_mx_irq_domain_xlate(struct irq_domain *d,
  45                struct device_node *ctrlr,
  46                const u32 *intspec, unsigned int intsize,
  47                unsigned long *out_hwirq, unsigned int *out_type)
  48{
  49        return xtensa_irq_domain_xlate(intspec, intsize,
  50                        intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE,
  51                        out_hwirq, out_type);
  52}
  53
  54static const struct irq_domain_ops xtensa_mx_irq_domain_ops = {
  55        .xlate = xtensa_mx_irq_domain_xlate,
  56        .map = xtensa_mx_irq_map,
  57};
  58
  59void secondary_init_irq(void)
  60{
  61        __this_cpu_write(cached_irq_mask,
  62                        XCHAL_INTTYPE_MASK_EXTERN_EDGE |
  63                        XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
  64        set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
  65                        XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
  66}
  67
  68static void xtensa_mx_irq_mask(struct irq_data *d)
  69{
  70        unsigned int mask = 1u << d->hwirq;
  71
  72        if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
  73                                XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
  74                set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
  75                                        HW_IRQ_MX_BASE), MIENG);
  76        } else {
  77                mask = __this_cpu_read(cached_irq_mask) & ~mask;
  78                __this_cpu_write(cached_irq_mask, mask);
  79                set_sr(mask, intenable);
  80        }
  81}
  82
  83static void xtensa_mx_irq_unmask(struct irq_data *d)
  84{
  85        unsigned int mask = 1u << d->hwirq;
  86
  87        if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
  88                                XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
  89                set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
  90                                        HW_IRQ_MX_BASE), MIENGSET);
  91        } else {
  92                mask |= __this_cpu_read(cached_irq_mask);
  93                __this_cpu_write(cached_irq_mask, mask);
  94                set_sr(mask, intenable);
  95        }
  96}
  97
  98static void xtensa_mx_irq_enable(struct irq_data *d)
  99{
 100        variant_irq_enable(d->hwirq);
 101        xtensa_mx_irq_unmask(d);
 102}
 103
 104static void xtensa_mx_irq_disable(struct irq_data *d)
 105{
 106        xtensa_mx_irq_mask(d);
 107        variant_irq_disable(d->hwirq);
 108}
 109
 110static void xtensa_mx_irq_ack(struct irq_data *d)
 111{
 112        set_sr(1 << d->hwirq, intclear);
 113}
 114
 115static int xtensa_mx_irq_retrigger(struct irq_data *d)
 116{
 117        set_sr(1 << d->hwirq, intset);
 118        return 1;
 119}
 120
 121static int xtensa_mx_irq_set_affinity(struct irq_data *d,
 122                const struct cpumask *dest, bool force)
 123{
 124        unsigned mask = 1u << cpumask_any_and(dest, cpu_online_mask);
 125
 126        set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
 127        return 0;
 128
 129}
 130
 131static struct irq_chip xtensa_mx_irq_chip = {
 132        .name           = "xtensa-mx",
 133        .irq_enable     = xtensa_mx_irq_enable,
 134        .irq_disable    = xtensa_mx_irq_disable,
 135        .irq_mask       = xtensa_mx_irq_mask,
 136        .irq_unmask     = xtensa_mx_irq_unmask,
 137        .irq_ack        = xtensa_mx_irq_ack,
 138        .irq_retrigger  = xtensa_mx_irq_retrigger,
 139        .irq_set_affinity = xtensa_mx_irq_set_affinity,
 140};
 141
 142int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
 143{
 144        struct irq_domain *root_domain =
 145                irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
 146                                &xtensa_mx_irq_domain_ops,
 147                                &xtensa_mx_irq_chip);
 148        irq_set_default_host(root_domain);
 149        secondary_init_irq();
 150        return 0;
 151}
 152
 153static int __init xtensa_mx_init(struct device_node *np,
 154                struct device_node *interrupt_parent)
 155{
 156        struct irq_domain *root_domain =
 157                irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
 158                                &xtensa_mx_irq_chip);
 159        irq_set_default_host(root_domain);
 160        secondary_init_irq();
 161        return 0;
 162}
 163IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
 164