1
2
3
4
5
6
7
8
9
10
11
12
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/io.h>
21#include <linux/of.h>
22#include <linux/of_address.h>
23#include <linux/irq.h>
24#include <linux/irqchip.h>
25#include <linux/irqdomain.h>
26
27#include <asm/v7m.h>
28#include <asm/exception.h>
29
30#define NVIC_ISER 0x000
31#define NVIC_ICER 0x080
32#define NVIC_IPR 0x300
33
34#define NVIC_MAX_BANKS 16
35
36
37
38
39#define NVIC_MAX_IRQ ((NVIC_MAX_BANKS - 1) * 32 + 16)
40
41static struct irq_domain *nvic_irq_domain;
42
43asmlinkage void __exception_irq_entry
44nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
45{
46 unsigned int irq = irq_linear_revmap(nvic_irq_domain, hwirq);
47
48 handle_IRQ(irq, regs);
49}
50
51static int nvic_irq_domain_translate(struct irq_domain *d,
52 struct irq_fwspec *fwspec,
53 unsigned long *hwirq, unsigned int *type)
54{
55 if (WARN_ON(fwspec->param_count < 1))
56 return -EINVAL;
57 *hwirq = fwspec->param[0];
58 *type = IRQ_TYPE_NONE;
59 return 0;
60}
61
62static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
63 unsigned int nr_irqs, void *arg)
64{
65 int i, ret;
66 irq_hw_number_t hwirq;
67 unsigned int type = IRQ_TYPE_NONE;
68 struct irq_fwspec *fwspec = arg;
69
70 ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type);
71 if (ret)
72 return ret;
73
74 for (i = 0; i < nr_irqs; i++)
75 irq_map_generic_chip(domain, virq + i, hwirq + i);
76
77 return 0;
78}
79
80static const struct irq_domain_ops nvic_irq_domain_ops = {
81 .translate = nvic_irq_domain_translate,
82 .alloc = nvic_irq_domain_alloc,
83 .free = irq_domain_free_irqs_top,
84};
85
86static int __init nvic_of_init(struct device_node *node,
87 struct device_node *parent)
88{
89 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
90 unsigned int irqs, i, ret, numbanks;
91 void __iomem *nvic_base;
92
93 numbanks = (readl_relaxed(V7M_SCS_ICTR) &
94 V7M_SCS_ICTR_INTLINESNUM_MASK) + 1;
95
96 nvic_base = of_iomap(node, 0);
97 if (!nvic_base) {
98 pr_warn("unable to map nvic registers\n");
99 return -ENOMEM;
100 }
101
102 irqs = numbanks * 32;
103 if (irqs > NVIC_MAX_IRQ)
104 irqs = NVIC_MAX_IRQ;
105
106 nvic_irq_domain =
107 irq_domain_add_linear(node, irqs, &nvic_irq_domain_ops, NULL);
108
109 if (!nvic_irq_domain) {
110 pr_warn("Failed to allocate irq domain\n");
111 return -ENOMEM;
112 }
113
114 ret = irq_alloc_domain_generic_chips(nvic_irq_domain, 32, 1,
115 "nvic_irq", handle_fasteoi_irq,
116 clr, 0, IRQ_GC_INIT_MASK_CACHE);
117 if (ret) {
118 pr_warn("Failed to allocate irq chips\n");
119 irq_domain_remove(nvic_irq_domain);
120 return ret;
121 }
122
123 for (i = 0; i < numbanks; ++i) {
124 struct irq_chip_generic *gc;
125
126 gc = irq_get_domain_generic_chip(nvic_irq_domain, 32 * i);
127 gc->reg_base = nvic_base + 4 * i;
128 gc->chip_types[0].regs.enable = NVIC_ISER;
129 gc->chip_types[0].regs.disable = NVIC_ICER;
130 gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
131 gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
132
133
134
135 gc->chip_types[0].chip.irq_eoi = irq_gc_noop;
136
137
138 writel_relaxed(~0, gc->reg_base + NVIC_ICER);
139 }
140
141
142 for (i = 0; i < irqs; i += 4)
143 writel_relaxed(0, nvic_base + NVIC_IPR + i);
144
145 return 0;
146}
147IRQCHIP_DECLARE(armv7m_nvic, "arm,armv7m-nvic", nvic_of_init);
148